+RESULT eServiceMP3::audioTracks(ePtr<iAudioTrackSelection> &ptr)
+{
+ ptr = this;
+ return 0;
+}
+
+RESULT eServiceMP3::subtitle(ePtr<iSubtitleOutput> &ptr)
+{
+ ptr = this;
+ return 0;
+}
+
+RESULT eServiceMP3::audioDelay(ePtr<iAudioDelay> &ptr)
+{
+ ptr = this;
+ return 0;
+}
+
+int eServiceMP3::getNumberOfTracks()
+{
+ return m_audioStreams.size();
+}
+
+int eServiceMP3::getCurrentTrack()
+{
+ if (m_currentAudioStream == -1)
+ g_object_get (G_OBJECT (m_gst_playbin), "current-audio", &m_currentAudioStream, NULL);
+ return m_currentAudioStream;
+}
+
+RESULT eServiceMP3::selectTrack(unsigned int i)
+{
+ pts_t ppos;
+ getPlayPosition(ppos);
+ ppos -= 90000;
+ if (ppos < 0)
+ ppos = 0;
+
+ int ret = selectAudioStream(i);
+ if (!ret) {
+ /* flush */
+ seekTo(ppos);
+ }
+
+ return ret;
+}
+
+int eServiceMP3::selectAudioStream(int i)
+{
+ int current_audio;
+ g_object_set (G_OBJECT (m_gst_playbin), "current-audio", i, NULL);
+ g_object_get (G_OBJECT (m_gst_playbin), "current-audio", ¤t_audio, NULL);
+ if ( current_audio == i )
+ {
+ eDebug ("eServiceMP3::switched to audio stream %i", current_audio);
+ m_currentAudioStream = i;
+ return 0;
+ }
+ return -1;
+}
+
+int eServiceMP3::getCurrentChannel()
+{
+ return STEREO;
+}
+
+RESULT eServiceMP3::selectChannel(int i)
+{
+ eDebug("eServiceMP3::selectChannel(%i)",i);
+ return 0;
+}
+
+RESULT eServiceMP3::getTrackInfo(struct iAudioTrackInfo &info, unsigned int i)
+{
+ if (i >= m_audioStreams.size())
+ return -2;
+ info.m_description = m_audioStreams[i].codec;
+/* if (m_audioStreams[i].type == atMPEG)
+ info.m_description = "MPEG";
+ else if (m_audioStreams[i].type == atMP3)
+ info.m_description = "MP3";
+ else if (m_audioStreams[i].type == atAC3)
+ info.m_description = "AC3";
+ else if (m_audioStreams[i].type == atAAC)
+ info.m_description = "AAC";
+ else if (m_audioStreams[i].type == atDTS)
+ info.m_description = "DTS";
+ else if (m_audioStreams[i].type == atPCM)
+ info.m_description = "PCM";
+ else if (m_audioStreams[i].type == atOGG)
+ info.m_description = "OGG";
+ else if (m_audioStreams[i].type == atFLAC)
+ info.m_description = "FLAC";
+ else
+ info.m_description = "???";*/
+ if (info.m_language.empty())
+ info.m_language = m_audioStreams[i].language_code;
+ return 0;
+}
+
+void eServiceMP3::gstBusCall(GstBus *bus, GstMessage *msg)
+{
+ if (!msg)
+ return;
+ gchar *sourceName;
+ GstObject *source;
+
+ source = GST_MESSAGE_SRC(msg);
+ sourceName = gst_object_get_name(source);
+#if 0
+ if (gst_message_get_structure(msg))
+ {
+ gchar *string = gst_structure_to_string(gst_message_get_structure(msg));
+ eDebug("eServiceMP3::gst_message from %s: %s", sourceName, string);
+ g_free(string);
+ }
+ else
+ eDebug("eServiceMP3::gst_message from %s: %s (without structure)", sourceName, GST_MESSAGE_TYPE_NAME(msg));
+#endif
+ switch (GST_MESSAGE_TYPE (msg))
+ {
+ case GST_MESSAGE_EOS:
+ m_event((iPlayableService*)this, evEOF);
+ break;
+ case GST_MESSAGE_STATE_CHANGED:
+ {
+ if(GST_MESSAGE_SRC(msg) != GST_OBJECT(m_gst_playbin))
+ break;
+
+ GstState old_state, new_state;
+ gst_message_parse_state_changed(msg, &old_state, &new_state, NULL);
+
+ if(old_state == new_state)
+ break;
+
+ eDebug("eServiceMP3::state transition %s -> %s", gst_element_state_get_name(old_state), gst_element_state_get_name(new_state));
+
+ GstStateChange transition = (GstStateChange)GST_STATE_TRANSITION(old_state, new_state);
+
+ switch(transition)
+ {
+ case GST_STATE_CHANGE_NULL_TO_READY:
+ {
+ } break;
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ {
+ GstElement *sink;
+ g_object_get (G_OBJECT (m_gst_playbin), "text-sink", &sink, NULL);
+ if (sink)
+ {
+ g_object_set (G_OBJECT (sink), "max-buffers", 2, NULL);
+ g_object_set (G_OBJECT (sink), "sync", FALSE, NULL);
+ g_object_set (G_OBJECT (sink), "async", FALSE, NULL);
+ g_object_set (G_OBJECT (sink), "emit-signals", TRUE, NULL);
+ gst_object_unref(sink);
+ }
+ setAC3Delay(ac3_delay);
+ setPCMDelay(pcm_delay);
+ } break;
+ case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
+ {
+ } break;
+ case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
+ {
+ } break;
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ {
+ } break;
+ case GST_STATE_CHANGE_READY_TO_NULL:
+ {
+ } break;
+ }
+ break;
+ }
+ case GST_MESSAGE_ERROR:
+ {
+ gchar *debug;
+ GError *err;
+ gst_message_parse_error (msg, &err, &debug);
+ g_free (debug);
+ eWarning("Gstreamer error: %s (%i) from %s", err->message, err->code, sourceName );
+ if ( err->domain == GST_STREAM_ERROR )
+ {
+ if ( err->code == GST_STREAM_ERROR_CODEC_NOT_FOUND )
+ {
+ if ( g_strrstr(sourceName, "videosink") )
+ m_event((iPlayableService*)this, evUser+11);
+ else if ( g_strrstr(sourceName, "audiosink") )
+ m_event((iPlayableService*)this, evUser+10);
+ }
+ }
+ g_error_free(err);
+ break;
+ }
+ case GST_MESSAGE_INFO:
+ {
+ gchar *debug;
+ GError *inf;
+
+ gst_message_parse_info (msg, &inf, &debug);
+ g_free (debug);
+ if ( inf->domain == GST_STREAM_ERROR && inf->code == GST_STREAM_ERROR_DECODE )
+ {
+ if ( g_strrstr(sourceName, "videosink") )
+ m_event((iPlayableService*)this, evUser+14);
+ }
+ g_error_free(inf);
+ break;
+ }
+ case GST_MESSAGE_TAG:
+ {
+ GstTagList *tags, *result;
+ gst_message_parse_tag(msg, &tags);
+
+ result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_REPLACE);
+ if (result)
+ {
+ if (m_stream_tags)
+ gst_tag_list_free(m_stream_tags);
+ m_stream_tags = result;
+ }
+
+ const GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0);
+ if ( gv_image )
+ {
+ GstBuffer *buf_image;
+ buf_image = gst_value_get_buffer (gv_image);
+ int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644);
+ int ret = write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image));
+ close(fd);
+ eDebug("eServiceMP3::/tmp/.id3coverart %d bytes written ", ret);
+ m_event((iPlayableService*)this, evUser+13);
+ }
+ gst_tag_list_free(tags);
+ m_event((iPlayableService*)this, evUpdatedInfo);
+ break;
+ }
+ case GST_MESSAGE_ASYNC_DONE:
+ {
+ if(GST_MESSAGE_SRC(msg) != GST_OBJECT(m_gst_playbin))
+ break;
+
+ GstTagList *tags;
+ gint i, active_idx, n_video = 0, n_audio = 0, n_text = 0;
+
+ g_object_get (m_gst_playbin, "n-video", &n_video, NULL);
+ g_object_get (m_gst_playbin, "n-audio", &n_audio, NULL);
+ g_object_get (m_gst_playbin, "n-text", &n_text, NULL);
+
+ eDebug("eServiceMP3::async-done - %d video, %d audio, %d subtitle", n_video, n_audio, n_text);
+
+ if ( n_video + n_audio <= 0 )
+ stop();
+
+ active_idx = 0;
+
+ m_audioStreams.clear();
+ m_subtitleStreams.clear();
+
+ for (i = 0; i < n_audio; i++)
+ {
+ audioStream audio;
+ gchar *g_codec, *g_lang;
+ GstPad* pad = 0;
+ g_signal_emit_by_name (m_gst_playbin, "get-audio-pad", i, &pad);
+ GstCaps* caps = gst_pad_get_negotiated_caps(pad);
+ if (!caps)
+ continue;
+ GstStructure* str = gst_caps_get_structure(caps, 0);
+ const gchar *g_type = gst_structure_get_name(str);
+ eDebug("AUDIO STRUCT=%s", g_type);
+ audio.type = gstCheckAudioPad(str);
+ g_codec = g_strdup(g_type);
+ g_lang = g_strdup_printf ("und");
+ g_signal_emit_by_name (m_gst_playbin, "get-audio-tags", i, &tags);
+ if ( tags && gst_is_tag_list(tags) )
+ {
+ gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_codec);
+ gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_lang);
+ gst_tag_list_free(tags);
+ }
+ audio.language_code = std::string(g_lang);
+ audio.codec = std::string(g_codec);
+ eDebug("eServiceMP3::audio stream=%i codec=%s language=%s", i, g_codec, g_lang);
+ m_audioStreams.push_back(audio);
+ g_free (g_lang);
+ g_free (g_codec);
+ gst_caps_unref(caps);
+ }
+
+ for (i = 0; i < n_text; i++)
+ {
+ gchar *g_lang;
+// gchar *g_type;
+// GstPad* pad = 0;
+// g_signal_emit_by_name (m_gst_playbin, "get-text-pad", i, &pad);
+// GstCaps* caps = gst_pad_get_negotiated_caps(pad);
+// GstStructure* str = gst_caps_get_structure(caps, 0);
+// g_type = gst_structure_get_name(str);
+// g_signal_emit_by_name (m_gst_playbin, "get-text-tags", i, &tags);
+ subtitleStream subs;
+ subs.type = stPlainText;
+ g_lang = g_strdup_printf ("und");
+ if ( tags && gst_is_tag_list(tags) )
+ gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_lang);
+ subs.language_code = std::string(g_lang);
+ eDebug("eServiceMP3::subtitle stream=%i language=%s"/* type=%s*/, i, g_lang/*, g_type*/);
+ m_subtitleStreams.push_back(subs);
+ g_free (g_lang);
+// g_free (g_type);
+ }
+ m_event((iPlayableService*)this, evUpdatedEventInfo);
+ }
+ case GST_MESSAGE_ELEMENT:
+ {
+ if ( gst_is_missing_plugin_message(msg) )
+ {
+ gchar *description = gst_missing_plugin_message_get_description(msg);
+ if ( description )
+ {
+ m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n";
+ g_free(description);
+ m_event((iPlayableService*)this, evUser+12);
+ }
+ }
+ else if (const GstStructure *msgstruct = gst_message_get_structure(msg))
+ {
+ const gchar *eventname = gst_structure_get_name(msgstruct);
+ if ( eventname )
+ {
+ if (!strcmp(eventname, "eventSizeChanged") || !strcmp(eventname, "eventSizeAvail"))
+ {
+ gst_structure_get_int (msgstruct, "aspect_ratio", &m_aspect);
+ gst_structure_get_int (msgstruct, "width", &m_width);
+ gst_structure_get_int (msgstruct, "height", &m_height);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoSizeChanged);
+ }
+ else if (!strcmp(eventname, "eventFrameRateChanged") || !strcmp(eventname, "eventFrameRateAvail"))
+ {
+ gst_structure_get_int (msgstruct, "frame_rate", &m_framerate);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoFramerateChanged);
+ }
+ else if (!strcmp(eventname, "eventProgressiveChanged") || !strcmp(eventname, "eventProgressiveAvail"))
+ {
+ gst_structure_get_int (msgstruct, "progressive", &m_progressive);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoProgressiveChanged);
+ }
+ }
+ }
+ break;
+ }
+ case GST_MESSAGE_BUFFERING:
+ {
+ GstBufferingMode mode;
+ gst_message_parse_buffering(msg, &(m_bufferInfo.bufferPercent));
+ gst_message_parse_buffering_stats(msg, &mode, &(m_bufferInfo.avgInRate), &(m_bufferInfo.avgOutRate), &(m_bufferInfo.bufferingLeft));
+ m_event((iPlayableService*)this, evBuffering);
+ }
+ default:
+ break;
+ }
+ g_free (sourceName);
+}
+
+GstBusSyncReply eServiceMP3::gstBusSyncHandler(GstBus *bus, GstMessage *message, gpointer user_data)
+{
+ eServiceMP3 *_this = (eServiceMP3*)user_data;
+ _this->m_pump.send(1);
+ /* wake */
+ return GST_BUS_PASS;
+}
+
+audiotype_t eServiceMP3::gstCheckAudioPad(GstStructure* structure)
+{
+ if (!structure)
+ return atUnknown;
+
+ if ( gst_structure_has_name (structure, "audio/mpeg"))
+ {
+ gint mpegversion, layer = -1;
+ if (!gst_structure_get_int (structure, "mpegversion", &mpegversion))
+ return atUnknown;
+
+ switch (mpegversion) {
+ case 1:
+ {
+ gst_structure_get_int (structure, "layer", &layer);
+ if ( layer == 3 )
+ return atMP3;
+ else
+ return atMPEG;
+ break;
+ }
+ case 2:
+ return atAAC;
+ case 4:
+ return atAAC;
+ default:
+ return atUnknown;
+ }
+ }
+
+ else if ( gst_structure_has_name (structure, "audio/x-ac3") || gst_structure_has_name (structure, "audio/ac3") )
+ return atAC3;
+ else if ( gst_structure_has_name (structure, "audio/x-dts") || gst_structure_has_name (structure, "audio/dts") )
+ return atDTS;
+ else if ( gst_structure_has_name (structure, "audio/x-raw-int") )
+ return atPCM;
+
+ return atUnknown;
+}
+
+void eServiceMP3::gstPoll(const int &msg)
+{
+ /* ok, we have a serious problem here. gstBusSyncHandler sends
+ us the wakup signal, but likely before it was posted.
+ the usleep, an EVIL HACK (DON'T DO THAT!!!) works around this.
+
+ I need to understand the API a bit more to make this work
+ proplerly. */
+ if (msg == 1)
+ {
+ GstBus *bus = gst_pipeline_get_bus (GST_PIPELINE (m_gst_playbin));
+ GstMessage *message;
+ usleep(1);
+ while ((message = gst_bus_pop (bus)))
+ {
+ gstBusCall(bus, message);
+ gst_message_unref (message);
+ }
+ }
+ else
+ pullSubtitle();
+}
+