X-Git-Url: https://git.cweiske.de/enigma2.git/blobdiff_plain/0a2edb202a5a94dc97d016457b841b1eb7df1f02..f14fe40f1fc2d6ba0c993bc0dabbfe250428aaa1:/lib/service/servicemp3.cpp diff --git a/lib/service/servicemp3.cpp b/lib/service/servicemp3.cpp index 2d217731..86b7696f 100644 --- a/lib/service/servicemp3.cpp +++ b/lib/service/servicemp3.cpp @@ -187,7 +187,7 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp CONNECT(m_pump.recv_msg, eServiceMP3::gstPoll); GstElement *source = 0; GstElement *decoder = 0, *conv = 0, *flt = 0, *parser = 0, *sink = 0; /* for audio */ - GstElement *audio = 0, *switch_audio = 0, *queue_audio = 0, *video = 0, *queue_video = 0, *videodemux = 0, *audiodemux = 0; + GstElement *audio = 0, *switch_audio = 0, *queue_audio = 0, *video = 0, *queue_video = 0, *videodemux = 0, *audiodemux = 0, *id3demux; m_state = stIdle; eDebug("SERVICEMP3 construct!"); @@ -373,16 +373,19 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp { case atMP3: { - if ( !audiodemux ) + id3demux = gst_element_factory_make("id3demux", "id3demux"); + if ( !id3demux ) { - parser = gst_element_factory_make("mp3parse", "audioparse"); - if (!parser) - { - m_error_message += "failed to create Gstreamer element mp3parse\n"; - break; - } + m_error_message += "failed to create Gstreamer element id3demux\n"; + break; } - sink = gst_element_factory_make("dvbaudiosink", "audiosink"); + parser = gst_element_factory_make("mp3parse", "audiosink"); + if ( !parser ) + { + m_error_message += "failed to create Gstreamer element mp3parse\n"; + break; + } + sink = gst_element_factory_make("dvbaudiosink", "audiosink2"); if ( !sink ) m_error_message += "failed to create Gstreamer element dvbaudiosink\n"; else @@ -532,17 +535,18 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp else { gst_bin_add_many (GST_BIN (m_gst_pipeline), source, sink, NULL); - if ( parser ) + if ( parser && id3demux ) { - gst_bin_add (GST_BIN (m_gst_pipeline), parser); - gst_element_link_many(source, parser, sink, NULL); + gst_bin_add_many (GST_BIN (m_gst_pipeline), parser, id3demux, NULL); + gst_element_link(source, id3demux); + g_signal_connect(id3demux, "pad-added", G_CALLBACK (gstCBpadAdded), this); + gst_element_link(parser, sink); } if ( audiodemux ) { gst_bin_add (GST_BIN (m_gst_pipeline), audiodemux); g_signal_connect(audiodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this); gst_element_link(source, audiodemux); - eDebug("linked source, audiodemux, sink"); } audioStream audio; audio.type = sourceinfo.audiotype; @@ -1022,114 +1026,155 @@ void eServiceMP3::gstBusCall(GstBus *bus, GstMessage *msg) #endif switch (GST_MESSAGE_TYPE (msg)) { - case GST_MESSAGE_EOS: - m_event((iPlayableService*)this, evEOF); - break; - case GST_MESSAGE_ERROR: - { - gchar *debug; - GError *err; - - gst_message_parse_error (msg, &err, &debug); - g_free (debug); - eWarning("Gstreamer error: %s (%i)", err->message, err->code ); - if ( err->domain == GST_STREAM_ERROR && err->code == GST_STREAM_ERROR_DECODE ) - { - if ( g_strrstr(sourceName, "videosink") ) - m_event((iPlayableService*)this, evUser+11); - } - g_error_free(err); - /* TODO: signal error condition to user */ - break; - } - case GST_MESSAGE_TAG: - { - GstTagList *tags, *result; - gst_message_parse_tag(msg, &tags); - - result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_PREPEND); - if (result) + case GST_MESSAGE_EOS: + m_event((iPlayableService*)this, evEOF); + break; + case GST_MESSAGE_ERROR: { - if (m_stream_tags) - gst_tag_list_free(m_stream_tags); - m_stream_tags = result; + gchar *debug; + GError *err; + + gst_message_parse_error (msg, &err, &debug); + g_free (debug); + eWarning("Gstreamer error: %s (%i)", err->message, err->code ); + if ( err->domain == GST_STREAM_ERROR && err->code == GST_STREAM_ERROR_CODEC_NOT_FOUND ) + { + if ( g_strrstr(sourceName, "videosink") ) + m_event((iPlayableService*)this, evUser+11); + } + g_error_free(err); + break; } - - gchar *g_audiocodec; - if ( gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_audiocodec) && m_audioStreams.size() == 0 ) + case GST_MESSAGE_INFO: { - GstPad* pad = gst_element_get_pad (GST_ELEMENT(source), "src"); - GstCaps* caps = gst_pad_get_caps(pad); - GstStructure* str = gst_caps_get_structure(caps, 0); - if ( !str ) - break; - audioStream audio; - audio.type = gstCheckAudioPad(str); - m_audioStreams.push_back(audio); + gchar *debug; + GError *inf; + + gst_message_parse_info (msg, &inf, &debug); + g_free (debug); + if ( inf->domain == GST_STREAM_ERROR && inf->code == GST_STREAM_ERROR_DECODE ) + { + if ( g_strrstr(sourceName, "videosink") ) + m_event((iPlayableService*)this, evUser+14); + } + g_error_free(inf); + break; } - - GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0); - if ( gv_image ) + case GST_MESSAGE_TAG: { - GstBuffer *buf_image; - buf_image = gst_value_get_buffer (gv_image); - int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644); - int ret = write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image)); - close(fd); - m_event((iPlayableService*)this, evUser+13); + GstTagList *tags, *result; + gst_message_parse_tag(msg, &tags); + + result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_PREPEND); + if (result) + { + if (m_stream_tags) + gst_tag_list_free(m_stream_tags); + m_stream_tags = result; + } + + gchar *g_audiocodec; + if ( gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_audiocodec) && m_audioStreams.size() == 0 ) + { + GstPad* pad = gst_element_get_pad (GST_ELEMENT(source), "src"); + GstCaps* caps = gst_pad_get_caps(pad); + GstStructure* str = gst_caps_get_structure(caps, 0); + if ( !str ) + break; + audioStream audio; + audio.type = gstCheckAudioPad(str); + m_audioStreams.push_back(audio); + } + + const GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0); + if ( gv_image ) + { + GstBuffer *buf_image; + buf_image = gst_value_get_buffer (gv_image); + int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644); + write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image)); + close(fd); + m_event((iPlayableService*)this, evUser+13); + } + + gst_tag_list_free(tags); + m_event((iPlayableService*)this, evUpdatedInfo); + break; } - - gst_tag_list_free(tags); - m_event((iPlayableService*)this, evUpdatedInfo); - break; - } - case GST_MESSAGE_ASYNC_DONE: - { - GstTagList *tags; - for (std::vector::iterator IterAudioStream(m_audioStreams.begin()); IterAudioStream != m_audioStreams.end(); ++IterAudioStream) + case GST_MESSAGE_ASYNC_DONE: { - if ( IterAudioStream->pad ) + GstTagList *tags; + for (std::vector::iterator IterAudioStream(m_audioStreams.begin()); IterAudioStream != m_audioStreams.end(); ++IterAudioStream) { - g_object_get(IterAudioStream->pad, "tags", &tags, NULL); - gchar *g_language; - if ( gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) ) + if ( IterAudioStream->pad ) { - eDebug("found audio language %s",g_language); - IterAudioStream->language_code = std::string(g_language); - g_free (g_language); + g_object_get(IterAudioStream->pad, "tags", &tags, NULL); + gchar *g_language; + if ( tags && gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) ) + { + eDebug("found audio language %s",g_language); + IterAudioStream->language_code = std::string(g_language); + g_free (g_language); + } } } - } - for (std::vector::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream) - { - if ( IterSubtitleStream->pad ) + for (std::vector::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream) { - g_object_get(IterSubtitleStream->pad, "tags", &tags, NULL); - gchar *g_language; - if ( gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) ) + if ( IterSubtitleStream->pad ) { - eDebug("found subtitle language %s",g_language); - IterSubtitleStream->language_code = std::string(g_language); - g_free (g_language); + g_object_get(IterSubtitleStream->pad, "tags", &tags, NULL); + gchar *g_language; + if ( tags && gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) ) + { + eDebug("found subtitle language %s",g_language); + IterSubtitleStream->language_code = std::string(g_language); + g_free (g_language); + } } } } - } - case GST_MESSAGE_ELEMENT: - { - if ( gst_is_missing_plugin_message(msg) ) + case GST_MESSAGE_ELEMENT: { - gchar *description = gst_missing_plugin_message_get_description(msg); - if ( description ) + if ( gst_is_missing_plugin_message(msg) ) + { + gchar *description = gst_missing_plugin_message_get_description(msg); + if ( description ) + { + m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n"; + g_free(description); + m_event((iPlayableService*)this, evUser+12); + } + } + else if (const GstStructure *msgstruct = gst_message_get_structure(msg)) { - m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n"; - g_free(description); - m_event((iPlayableService*)this, evUser+12); + const gchar *eventname; + if ( eventname = gst_structure_get_name(msgstruct) ) + { + if (!strcmp(eventname, "eventSizeChanged")) + { + gint aspect_ratio, width, height = 0; + gst_structure_get_int (msgstruct, "aspect_ratio", &aspect_ratio); + gst_structure_get_int (msgstruct, "width", &width); + gst_structure_get_int (msgstruct, "height", &height); + eDebug("****** decoder threw eventSizeChanged! aspect_ratio=%i, width=%i, height=%i", aspect_ratio, width, height); + } + if (!strcmp(eventname, "eventFrameRateChanged")) + { + gint frame_rate = 0; + gst_structure_get_int (msgstruct, "frame_rate", &frame_rate); + eDebug("****** decoder threw eventFrameRateChanged! frame_rate=%i", frame_rate); + } + if (!strcmp(eventname, "eventProgressiveChanged")) + { + gint progressive = 0; + gst_structure_get_int (msgstruct, "progressive", &progressive); + eDebug("****** decoder threw eventProgressiveChanged! progressive=%i", progressive); + } + } } } - } - default: - break; + default: + break; } g_free (sourceName); } @@ -1217,7 +1262,7 @@ void eServiceMP3::gstCBpadAdded(GstElement *decodebin, GstPad *pad, gpointer use else { GstElement *queue_audio = gst_bin_get_by_name(pipeline , "queue_audio"); - if ( queue_audio) + if ( queue_audio ) { gst_pad_link(pad, gst_element_get_static_pad(queue_audio, "sink")); _this->m_audioStreams.push_back(audio);