X-Git-Url: https://git.cweiske.de/enigma2.git/blobdiff_plain/34b5be4a2cc2095806cf1860dc02360f546a306a..819285a4572823e343f0d1ab88e2c68c2caf2677:/lib/service/servicemp3.cpp diff --git a/lib/service/servicemp3.cpp b/lib/service/servicemp3.cpp index a738a5a8..9c1972d7 100644 --- a/lib/service/servicemp3.cpp +++ b/lib/service/servicemp3.cpp @@ -16,6 +16,7 @@ #include /* for subtitles */ #include +#include // eServiceFactoryMP3 @@ -27,6 +28,7 @@ eServiceFactoryMP3::eServiceFactoryMP3() if (sc) { std::list extensions; + extensions.push_back("mp2"); extensions.push_back("mp3"); extensions.push_back("ogg"); extensions.push_back("mpg"); @@ -35,8 +37,10 @@ eServiceFactoryMP3::eServiceFactoryMP3() extensions.push_back("wave"); extensions.push_back("mkv"); extensions.push_back("avi"); + extensions.push_back("divx"); extensions.push_back("dat"); extensions.push_back("flac"); + extensions.push_back("mp4"); sc->addServiceFactory(eServiceFactoryMP3::id, this, extensions); } @@ -173,14 +177,13 @@ int eStaticServiceMP3Info::getLength(const eServiceReference &ref) eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eApp, 1) { + m_seekTimeout = eTimer::create(eApp); m_stream_tags = 0; - m_audioStreams.clear(); - m_subtitleStreams.clear(); m_currentAudioStream = 0; m_currentSubtitleStream = 0; m_subtitle_widget = 0; m_currentTrickRatio = 0; - CONNECT(m_seekTimeout.timeout, eServiceMP3::seekTimeoutCB); + CONNECT(m_seekTimeout->timeout, eServiceMP3::seekTimeoutCB); CONNECT(m_pump.recv_msg, eServiceMP3::gstPoll); GstElement *source = 0; @@ -198,169 +201,169 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp if (!ext) ext = filename; - int is_mpeg_ps = !(strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat")); - int is_mpeg_ts = !strcasecmp(ext, ".ts"); - int is_matroska = !strcasecmp(ext, ".mkv"); - int is_avi = !strcasecmp(ext, ".avi"); - int is_mp3 = !strcasecmp(ext, ".mp3"); /* force mp3 instead of decodebin */ - int is_video = is_mpeg_ps || is_mpeg_ts || is_matroska || is_avi; - int is_streaming = !strncmp(filename, "http://", 7); - int is_AudioCD = !(strncmp(filename, "/autofs/", 8) || strncmp(filename+strlen(filename)-13, "/track-", 7) || strcasecmp(ext, ".wav")); - int is_VCD = !strcasecmp(ext, ".dat"); - - eDebug("filename: %s, is_mpeg_ps: %d, is_mpeg_ts: %d, is_video: %d, is_streaming: %d, is_mp3: %d, is_matroska: %d, is_avi: %d, is_AudioCD: %d, is_VCD: %d", filename, is_mpeg_ps, is_mpeg_ts, is_video, is_streaming, is_mp3, is_matroska, is_avi, is_AudioCD, is_VCD); - - int is_audio = !is_video; + sourceStream sourceinfo; + if ( (strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat") ) == 0 ) + sourceinfo.containertype = ctMPEGPS; + else if ( strcasecmp(ext, ".ts") == 0 ) + sourceinfo.containertype = ctMPEGTS; + else if ( strcasecmp(ext, ".mkv") == 0 ) + sourceinfo.containertype = ctMKV; + else if ( strcasecmp(ext, ".avi") == 0 || strcasecmp(ext, ".divx") == 0) + sourceinfo.containertype = ctAVI; + else if ( strcasecmp(ext, ".mp4") == 0 ) + sourceinfo.containertype = ctMP4; + else if ( (strncmp(filename, "/autofs/", 8) || strncmp(filename+strlen(filename)-13, "/track-", 7) || strcasecmp(ext, ".wav")) == 0 ) + sourceinfo.containertype = ctCDA; + if ( strcasecmp(ext, ".dat") == 0 ) + sourceinfo.containertype = ctVCD; + if ( (strncmp(filename, "http://", 7)) == 0 ) + sourceinfo.is_streaming = TRUE; + + sourceinfo.is_video = ( sourceinfo.containertype && sourceinfo.containertype != ctCDA ); + + eDebug("filename=%s, containertype=%d, is_video=%d, is_streaming=%d", filename, sourceinfo.containertype, sourceinfo.is_video, sourceinfo.is_streaming); int all_ok = 0; m_gst_pipeline = gst_pipeline_new ("mediaplayer"); if (!m_gst_pipeline) - eWarning("failed to create pipeline"); + m_error_message = "failed to create GStreamer pipeline!\n"; - if (is_AudioCD) + if ( sourceinfo.is_streaming ) + { + eDebug("play webradio!"); + source = gst_element_factory_make ("neonhttpsrc", "http-source"); + if (source) + { + g_object_set (G_OBJECT (source), "location", filename, NULL); + g_object_set (G_OBJECT (source), "automatic-redirect", TRUE, NULL); + } + else + m_error_message = "GStreamer plugin neonhttpsrc not available!\n"; + } + else if ( sourceinfo.containertype == ctCDA ) { source = gst_element_factory_make ("cdiocddasrc", "cda-source"); if (source) + { g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL); + int track = atoi(filename+18); + eDebug("play audio CD track #%i",track); + if (track > 0) + g_object_set (G_OBJECT (source), "track", track, NULL); + } else - is_AudioCD = 0; + sourceinfo.containertype = ctNone; } - if ( !is_streaming && !is_AudioCD ) - source = gst_element_factory_make ("filesrc", "file-source"); - else if ( is_streaming ) + if ( !sourceinfo.is_streaming && sourceinfo.containertype != ctCDA ) { - source = gst_element_factory_make ("neonhttpsrc", "http-source"); + source = gst_element_factory_make ("filesrc", "file-source"); if (source) - g_object_set (G_OBJECT (source), "automatic-redirect", TRUE, NULL); + g_object_set (G_OBJECT (source), "location", filename, NULL); + else + m_error_message = "GStreamer can't open filesrc " + (std::string)filename + "!\n"; } + if ( sourceinfo.is_video ) + { + /* filesrc -> mpegdemux -> | queue_audio -> dvbaudiosink + | queue_video -> dvbvideosink */ - if (!source) - eWarning("failed to create %s", is_streaming ? "neonhttpsrc" : "filesrc"); - /* configure source */ - else if (!is_AudioCD) - g_object_set (G_OBJECT (source), "location", filename, NULL); - else - { - int track = atoi(filename+18); - eDebug("play audio CD track #%i",track); - if (track > 0) - g_object_set (G_OBJECT (source), "track", track, NULL); - } + audio = gst_element_factory_make("dvbaudiosink", "audiosink"); + if (!audio) + m_error_message += "failed to create Gstreamer element dvbaudiosink\n"; + + video = gst_element_factory_make("dvbvideosink", "videosink"); + if (!video) + m_error_message += "failed to create Gstreamer element dvbvideosink\n"; + + queue_audio = gst_element_factory_make("queue", "queue_audio"); + queue_video = gst_element_factory_make("queue", "queue_video"); - if (is_audio) + std::string demux_type; + switch (sourceinfo.containertype) + { + case ctMPEGTS: + demux_type = "flutsdemux"; + break; + case ctMPEGPS: + case ctVCD: + demux_type = "flupsdemux"; + break; + case ctMKV: + demux_type = "matroskademux"; + break; + case ctAVI: + demux_type = "avidemux"; + break; + case ctMP4: + demux_type = "qtdemux"; + break; + default: + break; + } + videodemux = gst_element_factory_make(demux_type.c_str(), "videodemux"); + if (!videodemux) + m_error_message = "GStreamer plugin " + demux_type + " not available!\n"; + + switch_audio = gst_element_factory_make ("input-selector", "switch_audio"); + if (!switch_audio) + m_error_message = "GStreamer plugin input-selector not available!\n"; + + if (audio && queue_audio && video && queue_video && videodemux && switch_audio) + { + g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 256*1024, NULL); + g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 0, NULL); + g_object_set (G_OBJECT (queue_audio), "max-size-time", (guint64)0, NULL); + g_object_set (G_OBJECT (queue_video), "max-size-buffers", 0, NULL); + g_object_set (G_OBJECT (queue_video), "max-size-bytes", 2*1024*1024, NULL); + g_object_set (G_OBJECT (queue_video), "max-size-time", (guint64)0, NULL); + g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL); + all_ok = 1; + } + } else /* is audio */ { - /* filesrc -> decodebin -> audioconvert -> capsfilter -> alsasink */ - const char *decodertype = "decodebin"; - decoder = gst_element_factory_make (decodertype, "decoder"); + /* filesrc -> decodebin -> audioconvert -> capsfilter -> alsasink */ + decoder = gst_element_factory_make ("decodebin", "decoder"); if (!decoder) - eWarning("failed to create %s decoder", decodertype); + m_error_message += "failed to create Gstreamer element decodebin\n"; conv = gst_element_factory_make ("audioconvert", "converter"); if (!conv) - eWarning("failed to create audioconvert"); + m_error_message += "failed to create Gstreamer element audioconvert\n"; flt = gst_element_factory_make ("capsfilter", "flt"); if (!flt) - eWarning("failed to create capsfilter"); + m_error_message += "failed to create Gstreamer element capsfilter\n"; /* for some reasons, we need to set the sample format to depth/width=16, because auto negotiation doesn't work. */ /* endianness, however, is not required to be set anymore. */ if (flt) { - GstCaps *caps = gst_caps_new_simple("audio/x-raw-int", /* "endianness", G_TYPE_INT, 4321, */ "depth", G_TYPE_INT, 16, "width", G_TYPE_INT, 16, /*"channels", G_TYPE_INT, 2, */(char*)0); - g_object_set (G_OBJECT (flt), "caps", caps, (char*)0); + GstCaps *caps = gst_caps_new_simple("audio/x-raw-int", /* "endianness", G_TYPE_INT, 4321, */ "depth", G_TYPE_INT, 16, "width", G_TYPE_INT, 16, /*"channels", G_TYPE_INT, 2, */NULL); + g_object_set (G_OBJECT (flt), "caps", caps, NULL); gst_caps_unref(caps); } sink = gst_element_factory_make ("alsasink", "alsa-output"); if (!sink) - eWarning("failed to create osssink"); + m_error_message += "failed to create Gstreamer element alsasink\n"; if (source && decoder && conv && sink) all_ok = 1; - } else /* is_video */ - { - /* filesrc -> mpegdemux -> | queue_audio -> dvbaudiosink - | queue_video -> dvbvideosink */ - - audio = gst_element_factory_make("dvbaudiosink", "audiosink"); - queue_audio = gst_element_factory_make("queue", "queue_audio"); - - video = gst_element_factory_make("dvbvideosink", "videosink"); - queue_video = gst_element_factory_make("queue", "queue_video"); - - if (is_mpeg_ps) - videodemux = gst_element_factory_make("flupsdemux", "videodemux"); - else if (is_mpeg_ts) - videodemux = gst_element_factory_make("flutsdemux", "videodemux"); - else if (is_matroska) - videodemux = gst_element_factory_make("matroskademux", "videodemux"); - else if (is_avi) - videodemux = gst_element_factory_make("avidemux", "videodemux"); - - if (!videodemux) - { - eDebug("fluendo mpegdemux not available, falling back to mpegdemux\n"); - videodemux = gst_element_factory_make("mpegdemux", "videodemux"); - } - - eDebug("audio: %p, queue_audio %p, video %p, queue_video %p, videodemux %p", audio, queue_audio, video, queue_video, videodemux); - if (audio && queue_audio && video && queue_video && videodemux) - { - g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 256*1024, NULL); - g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 0, NULL); - g_object_set (G_OBJECT (queue_audio), "max-size-time", (guint64)0, NULL); - g_object_set (G_OBJECT (queue_video), "max-size-buffers", 0, NULL); - g_object_set (G_OBJECT (queue_video), "max-size-bytes", 2*1024*1024, NULL); - g_object_set (G_OBJECT (queue_video), "max-size-time", (guint64)0, NULL); - all_ok = 1; - } } - if (m_gst_pipeline && all_ok) { gst_bus_set_sync_handler(gst_pipeline_get_bus (GST_PIPELINE (m_gst_pipeline)), gstBusSyncHandler, this); - if (is_AudioCD) + if ( sourceinfo.containertype == ctCDA ) { queue_audio = gst_element_factory_make("queue", "queue_audio"); g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL); gst_bin_add_many (GST_BIN (m_gst_pipeline), source, queue_audio, conv, sink, NULL); gst_element_link_many(source, queue_audio, conv, sink, NULL); } - else if (is_audio) - { - queue_audio = gst_element_factory_make("queue", "queue_audio"); - - g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this); - g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this); - - if (!is_mp3) - g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL); - - /* gst_bin will take the 'floating references' */ - gst_bin_add_many (GST_BIN (m_gst_pipeline), - source, queue_audio, decoder, NULL); - - /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */ - gst_element_link_many(source, queue_audio, decoder, NULL); - - /* create audio bin with the audioconverter, the capsfilter and the audiosink */ - audio = gst_bin_new ("audiobin"); - - GstPad *audiopad = gst_element_get_static_pad (conv, "sink"); - gst_bin_add_many(GST_BIN(audio), conv, flt, sink, (char*)0); - gst_element_link_many(conv, flt, sink, (char*)0); - gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad)); - gst_object_unref(audiopad); - gst_bin_add (GST_BIN(m_gst_pipeline), audio); - /* in mad's case, we can directly connect the decoder to the audiobin. otherwise, we do this in gstCBnewPad */ - if (is_mp3) - gst_element_link(decoder, audio); - - } else /* is_video */ + else if ( sourceinfo.is_video ) { char srt_filename[strlen(filename)+1]; strncpy(srt_filename,filename,strlen(filename)-3); @@ -372,32 +375,18 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp eDebug("subtitle file found: %s",srt_filename); GstElement *subsource = gst_element_factory_make ("filesrc", "srt_source"); g_object_set (G_OBJECT (subsource), "location", srt_filename, NULL); - GstElement *parser = gst_element_factory_make("subparse", "parse_subtitles"); - GstElement *switch_subtitles = gst_element_factory_make ("input-selector", "switch_subtitles"); - GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles"); - gst_bin_add_many(GST_BIN (m_gst_pipeline), subsource, switch_subtitles, parser, sink, NULL); - gst_element_link(subsource, switch_subtitles); - gst_element_link(switch_subtitles, parser); - gst_element_link(parser, sink); - g_object_set (G_OBJECT(switch_subtitles), "select-all", TRUE, NULL); - g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL); - g_object_set (G_OBJECT(sink), "sync", TRUE, NULL); - g_object_set (G_OBJECT(parser), "subtitle-encoding", "ISO-8859-15", NULL); - g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), this); + gst_bin_add(GST_BIN (m_gst_pipeline), subsource); + GstPad *switchpad = gstCreateSubtitleSink(this, stSRT); + gst_pad_link(gst_element_get_pad (subsource, "src"), switchpad); subtitleStream subs; - subs.language_code = std::string(".srt file"); + subs.pad = switchpad; + subs.type = stSRT; + subs.language_code = std::string("und"); m_subtitleStreams.push_back(subs); } - gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, NULL); - switch_audio = gst_element_factory_make ("input-selector", "switch_audio"); - if (switch_audio) - { - g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL); - gst_bin_add(GST_BIN(m_gst_pipeline), switch_audio); - gst_element_link(switch_audio, queue_audio); - } + gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, switch_audio, NULL); - if (is_VCD) + if ( sourceinfo.containertype == ctVCD ) { GstElement *cdxaparse = gst_element_factory_make("cdxaparse", "cdxaparse"); gst_bin_add(GST_BIN(m_gst_pipeline), cdxaparse); @@ -406,12 +395,42 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp } else gst_element_link(source, videodemux); + + gst_element_link(switch_audio, queue_audio); gst_element_link(queue_audio, audio); gst_element_link(queue_video, video); g_signal_connect(videodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this); + + } else /* is audio*/ + { + queue_audio = gst_element_factory_make("queue", "queue_audio"); + + g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this); + g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this); + + g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL); + + /* gst_bin will take the 'floating references' */ + gst_bin_add_many (GST_BIN (m_gst_pipeline), + source, queue_audio, decoder, NULL); + + /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */ + gst_element_link_many(source, queue_audio, decoder, NULL); + + /* create audio bin with the audioconverter, the capsfilter and the audiosink */ + audio = gst_bin_new ("audiobin"); + + GstPad *audiopad = gst_element_get_static_pad (conv, "sink"); + gst_bin_add_many(GST_BIN(audio), conv, flt, sink, NULL); + gst_element_link_many(conv, flt, sink, NULL); + gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad)); + gst_object_unref(audiopad); + gst_bin_add (GST_BIN(m_gst_pipeline), audio); } } else { + m_event((iPlayableService*)this, evUser+12); + if (m_gst_pipeline) gst_object_unref(GST_OBJECT(m_gst_pipeline)); if (source) @@ -436,10 +455,10 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp if (switch_audio) gst_object_unref(GST_OBJECT(switch_audio)); - eDebug("sorry, can't play."); + eDebug("sorry, can't play: %s",m_error_message.c_str()); m_gst_pipeline = 0; } - + gst_element_set_state (m_gst_pipeline, GST_STATE_PLAYING); } @@ -513,9 +532,9 @@ RESULT eServiceMP3::setFastForward(int ratio) { m_currentTrickRatio = ratio; if (ratio) - m_seekTimeout.start(1000, 0); + m_seekTimeout->start(1000, 0); else - m_seekTimeout.stop(); + m_seekTimeout->stop(); return 0; } @@ -529,13 +548,13 @@ void eServiceMP3::seekTimeoutCB() if (ppos < 0) { ppos = 0; - m_seekTimeout.stop(); + m_seekTimeout->stop(); } if (ppos > len) { ppos = 0; stop(); - m_seekTimeout.stop(); + m_seekTimeout->stop(); return; } seekTo(ppos); @@ -681,6 +700,7 @@ int eServiceMP3::getInfo(int w) case sTracknumber: case sGenre: case sVideoType: + case sTimeCreate: case sUser+12: return resIsString; case sCurrentTitle: @@ -706,6 +726,8 @@ int eServiceMP3::getInfo(int w) std::string eServiceMP3::getInfoString(int w) { + if ( !m_stream_tags ) + return ""; gchar *tag = 0; switch (w) { @@ -730,24 +752,29 @@ std::string eServiceMP3::getInfoString(int w) case sVideoType: tag = GST_TAG_VIDEO_CODEC; break; + case sTimeCreate: + GDate *date; + if (gst_tag_list_get_date(m_stream_tags, GST_TAG_DATE, &date)) + { + gchar res[5]; + g_date_strftime (res, sizeof(res), "%Y", date); + return (std::string)res; + } + break; case sUser+12: return m_error_message; default: return ""; } - - if (!m_stream_tags || !tag) + if ( !tag ) return ""; - gchar *value; - if (gst_tag_list_get_string(m_stream_tags, tag, &value)) { std::string res = value; g_free(value); return res; } - return ""; } @@ -918,6 +945,20 @@ void eServiceMP3::gstBusCall(GstBus *bus, GstMessage *msg) audio.type = gstCheckAudioPad(str); m_audioStreams.push_back(audio); } + + GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0); + if ( gv_image ) + { + GstBuffer *buf_image; + buf_image = gst_value_get_buffer (gv_image); + int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644); + int ret = write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image)); + close(fd); + m_event((iPlayableService*)this, evUser+13); + } + + gst_tag_list_free(tags); + m_event((iPlayableService*)this, evUpdatedInfo); break; } case GST_MESSAGE_ASYNC_DONE: @@ -956,10 +997,10 @@ void eServiceMP3::gstBusCall(GstBus *bus, GstMessage *msg) { if ( gst_is_missing_plugin_message(msg) ) { - gchar *description = gst_missing_plugin_message_get_description(msg); + gchar *description = gst_missing_plugin_message_get_description(msg); if ( description ) { - m_error_message = description; + m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n"; g_free(description); m_event((iPlayableService*)this, evUser+12); } @@ -1063,28 +1104,74 @@ void eServiceMP3::gstCBpadAdded(GstElement *decodebin, GstPad *pad, gpointer use } if (g_strrstr(type,"application/x-ssa") || g_strrstr(type,"application/x-ass")) { - GstElement *switch_subtitles = gst_bin_get_by_name(pipeline,"switch_subtitles"); - if ( !switch_subtitles ) - { - switch_subtitles = gst_element_factory_make ("input-selector", "switch_subtitles"); - if ( !switch_subtitles ) - return; - GstElement *parser = gst_element_factory_make("ssaparse", "parse_subtitles"); - GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles"); - gst_bin_add_many(pipeline, switch_subtitles, parser, sink, NULL); - gst_element_link(switch_subtitles, parser); - gst_element_link(parser, sink); - g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL); - g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), _this); - } - GstPad *sinkpad = gst_element_get_request_pad (switch_subtitles, "sink%d"); - gst_pad_link(pad, sinkpad); + GstPad *switchpad = _this->gstCreateSubtitleSink(_this, stSSA); + gst_pad_link(pad, switchpad); + subtitleStream subs; + subs.pad = switchpad; + subs.type = stSSA; + _this->m_subtitleStreams.push_back(subs); + } + if (g_strrstr(type,"text/plain")) + { + GstPad *switchpad = _this->gstCreateSubtitleSink(_this, stPlainText); + gst_pad_link(pad, switchpad); subtitleStream subs; - subs.pad = sinkpad; + subs.pad = switchpad; + subs.type = stPlainText; _this->m_subtitleStreams.push_back(subs); } } +GstPad* eServiceMP3::gstCreateSubtitleSink(eServiceMP3* _this, subtype_t type) +{ + GstBin *pipeline = GST_BIN(_this->m_gst_pipeline); + GstElement *switch_subparse = gst_bin_get_by_name(pipeline,"switch_subparse"); + if ( !switch_subparse ) + { + switch_subparse = gst_element_factory_make ("input-selector", "switch_subparse"); + GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles"); + gst_bin_add_many(pipeline, switch_subparse, sink, NULL); + gst_element_link(switch_subparse, sink); + g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL); + g_object_set (G_OBJECT(sink), "sync", TRUE, NULL); + g_object_set (G_OBJECT(sink), "async", FALSE, NULL); + g_signal_connect(sink, "handoff", G_CALLBACK(_this->gstCBsubtitleAvail), _this); + + // order is essential since requested sink pad names can't be explicitely chosen + GstElement *switch_substream_plain = gst_element_factory_make ("input-selector", "switch_substream_plain"); + gst_bin_add(pipeline, switch_substream_plain); + GstPad *sinkpad_plain = gst_element_get_request_pad (switch_subparse, "sink%d"); + gst_pad_link(gst_element_get_pad (switch_substream_plain, "src"), sinkpad_plain); + + GstElement *switch_substream_ssa = gst_element_factory_make ("input-selector", "switch_substream_ssa"); + GstElement *ssaparse = gst_element_factory_make("ssaparse", "ssaparse"); + gst_bin_add_many(pipeline, switch_substream_ssa, ssaparse, NULL); + GstPad *sinkpad_ssa = gst_element_get_request_pad (switch_subparse, "sink%d"); + gst_element_link(switch_substream_ssa, ssaparse); + gst_pad_link(gst_element_get_pad (ssaparse, "src"), sinkpad_ssa); + + GstElement *switch_substream_srt = gst_element_factory_make ("input-selector", "switch_substream_srt"); + GstElement *srtparse = gst_element_factory_make("subparse", "srtparse"); + gst_bin_add_many(pipeline, switch_substream_srt, srtparse, NULL); + GstPad *sinkpad_srt = gst_element_get_request_pad (switch_subparse, "sink%d"); + gst_element_link(switch_substream_srt, srtparse); + gst_pad_link(gst_element_get_pad (srtparse, "src"), sinkpad_srt); + g_object_set (G_OBJECT(srtparse), "subtitle-encoding", "ISO-8859-15", NULL); + } + + switch (type) + { + case stSSA: + return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_ssa"), "sink%d"); + case stSRT: + return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_srt"), "sink%d"); + case stPlainText: + default: + break; + } + return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_plain"), "sink%d"); +} + void eServiceMP3::gstCBfilterPadAdded(GstElement *filter, GstPad *pad, gpointer user_data) { eServiceMP3 *_this = (eServiceMP3*)user_data; @@ -1157,28 +1244,30 @@ eAutoInitPtr init_eServiceFactoryMP3(eAutoInitNumbers::servi void eServiceMP3::gstCBsubtitleAvail(GstElement *element, GstBuffer *buffer, GstPad *pad, gpointer user_data) { + gint64 duration_ns = GST_BUFFER_DURATION(buffer); const unsigned char *text = (unsigned char *)GST_BUFFER_DATA(buffer); eDebug("gstCBsubtitleAvail: %s",text); eServiceMP3 *_this = (eServiceMP3*)user_data; if ( _this->m_subtitle_widget ) { - eDVBTeletextSubtitlePage page; + ePangoSubtitlePage page; gRGB rgbcol(0xD0,0xD0,0xD0); - page.m_elements.push_back(eDVBTeletextSubtitlePageElement(rgbcol, (const char*)text)); + page.m_elements.push_back(ePangoSubtitlePageElement(rgbcol, (const char*)text)); + page.m_timeout = duration_ns / 1000000; (_this->m_subtitle_widget)->setPage(page); } } RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple) { - eDebug("eServiceMP3::enableSubtitles"); - ePyObject entry; int tuplesize = PyTuple_Size(tuple); int pid; + int type; gint nb_sources; GstPad *active_pad; - GstElement *switch_subtitles = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_subtitles"); + GstElement *switch_substream = NULL; + GstElement *switch_subparse = gst_bin_get_by_name (GST_BIN(m_gst_pipeline), "switch_subparse"); if (!PyTuple_Check(tuple)) goto error_out; @@ -1188,32 +1277,52 @@ RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple) if (!PyInt_Check(entry)) goto error_out; pid = PyInt_AsLong(entry); + entry = PyTuple_GET_ITEM(tuple, 2); + if (!PyInt_Check(entry)) + goto error_out; + type = PyInt_AsLong(entry); + + switch ((subtype_t)type) + { + case stPlainText: + switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_plain"); + break; + case stSSA: + switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_ssa"); + break; + case stSRT: + switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_srt"); + break; + default: + goto error_out; + } m_subtitle_widget = new eSubtitleWidget(parent); m_subtitle_widget->resize(parent->size()); /* full size */ - if ( !switch_subtitles ) + if ( !switch_substream ) { eDebug("can't switch subtitle tracks! gst-plugin-selector needed"); return -2; } - g_object_get (G_OBJECT (switch_subtitles), "n-pads", &nb_sources, NULL); + g_object_get (G_OBJECT (switch_substream), "n-pads", &nb_sources, NULL); if ( (unsigned int)pid >= m_subtitleStreams.size() || pid >= nb_sources || (unsigned int)m_currentSubtitleStream >= m_subtitleStreams.size() ) return -2; - char sinkpad[8]; + g_object_get (G_OBJECT (switch_subparse), "n-pads", &nb_sources, NULL); + if ( type < 0 || type >= nb_sources ) + return -2; + + char sinkpad[6]; + sprintf(sinkpad, "sink%d", type); + g_object_set (G_OBJECT (switch_subparse), "active-pad", gst_element_get_pad (switch_subparse, sinkpad), NULL); sprintf(sinkpad, "sink%d", pid); - g_object_set (G_OBJECT (switch_subtitles), "active-pad", gst_element_get_pad (switch_subtitles, sinkpad), NULL); - g_object_get (G_OBJECT (switch_subtitles), "active-pad", &active_pad, NULL); - gchar *name; - name = gst_pad_get_name (active_pad); - eDebug ("switched subtitles to (%s)", name); - g_free(name); + g_object_set (G_OBJECT (switch_substream), "active-pad", gst_element_get_pad (switch_substream, sinkpad), NULL); m_currentSubtitleStream = pid; return 0; error_out: eDebug("enableSubtitles needs a tuple as 2nd argument!\n" - "for gst subtitles (2, subtitle_stream_count)"); + "for gst subtitles (2, subtitle_stream_count, subtitle_type)"); return -1; } @@ -1236,21 +1345,23 @@ PyObject *eServiceMP3::getSubtitleList() eDebug("eServiceMP3::getSubtitleList"); ePyObject l = PyList_New(0); - int stream_count = 0; + int stream_count[sizeof(subtype_t)]; + for ( unsigned int i = 0; i < sizeof(subtype_t); i++ ) + stream_count[i] = 0; for (std::vector::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream) { + subtype_t type = IterSubtitleStream->type; ePyObject tuple = PyTuple_New(5); PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(2)); - PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(stream_count)); - PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(0)); + PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(stream_count[type])); + PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(int(type))); PyTuple_SET_ITEM(tuple, 3, PyInt_FromLong(0)); PyTuple_SET_ITEM(tuple, 4, PyString_FromString((IterSubtitleStream->language_code).c_str())); PyList_Append(l, tuple); Py_DECREF(tuple); - stream_count++; + stream_count[type]++; } - return l; }