+ return ret;
+}
+
+int eServiceMP3::selectAudioStream(int i)
+{
+ int current_audio;
+ g_object_set (G_OBJECT (m_gst_playbin), "current-audio", i, NULL);
+ g_object_get (G_OBJECT (m_gst_playbin), "current-audio", ¤t_audio, NULL);
+ if ( current_audio == i )
+ {
+ eDebug ("eServiceMP3::switched to audio stream %i", current_audio);
+ m_currentAudioStream = i;
+ return 0;
+ }
+ return -1;
+}
+
+int eServiceMP3::getCurrentChannel()
+{
+ return STEREO;
+}
+
+RESULT eServiceMP3::selectChannel(int i)
+{
+ eDebug("eServiceMP3::selectChannel(%i)",i);
+ return 0;
+}
+
+RESULT eServiceMP3::getTrackInfo(struct iAudioTrackInfo &info, unsigned int i)
+{
+ if (i >= m_audioStreams.size())
+ return -2;
+ info.m_description = m_audioStreams[i].codec;
+/* if (m_audioStreams[i].type == atMPEG)
+ info.m_description = "MPEG";
+ else if (m_audioStreams[i].type == atMP3)
+ info.m_description = "MP3";
+ else if (m_audioStreams[i].type == atAC3)
+ info.m_description = "AC3";
+ else if (m_audioStreams[i].type == atAAC)
+ info.m_description = "AAC";
+ else if (m_audioStreams[i].type == atDTS)
+ info.m_description = "DTS";
+ else if (m_audioStreams[i].type == atPCM)
+ info.m_description = "PCM";
+ else if (m_audioStreams[i].type == atOGG)
+ info.m_description = "OGG";
+ else if (m_audioStreams[i].type == atFLAC)
+ info.m_description = "FLAC";
+ else
+ info.m_description = "???";*/
+ if (info.m_language.empty())
+ info.m_language = m_audioStreams[i].language_code;
+ return 0;
+}
+
+void eServiceMP3::gstBusCall(GstBus *bus, GstMessage *msg)
+{
+ if (!msg)
+ return;
+ gchar *sourceName;
+ GstObject *source;
+
+ source = GST_MESSAGE_SRC(msg);
+ sourceName = gst_object_get_name(source);
+#if 0
+ if (gst_message_get_structure(msg))
+ {
+ gchar *string = gst_structure_to_string(gst_message_get_structure(msg));
+ eDebug("eServiceMP3::gst_message from %s: %s", sourceName, string);
+ g_free(string);
+ }
+ else
+ eDebug("eServiceMP3::gst_message from %s: %s (without structure)", sourceName, GST_MESSAGE_TYPE_NAME(msg));
+#endif
+ switch (GST_MESSAGE_TYPE (msg))
+ {
+ case GST_MESSAGE_EOS:
+ m_event((iPlayableService*)this, evEOF);
+ break;
+ case GST_MESSAGE_STATE_CHANGED:
+ {
+ if(GST_MESSAGE_SRC(msg) != GST_OBJECT(m_gst_playbin))
+ break;
+
+ GstState old_state, new_state;
+ gst_message_parse_state_changed(msg, &old_state, &new_state, NULL);
+
+ if(old_state == new_state)
+ break;
+
+ eDebug("eServiceMP3::state transition %s -> %s", gst_element_state_get_name(old_state), gst_element_state_get_name(new_state));
+
+ GstStateChange transition = (GstStateChange)GST_STATE_TRANSITION(old_state, new_state);
+
+ switch(transition)
+ {
+ case GST_STATE_CHANGE_NULL_TO_READY:
+ {
+ } break;
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ {
+ GstElement *sink;
+ g_object_get (G_OBJECT (m_gst_playbin), "text-sink", &sink, NULL);
+ if (sink)
+ {
+ g_object_set (G_OBJECT (sink), "max-buffers", 2, NULL);
+ g_object_set (G_OBJECT (sink), "sync", FALSE, NULL);
+ g_object_set (G_OBJECT (sink), "async", FALSE, NULL);
+ g_object_set (G_OBJECT (sink), "emit-signals", TRUE, NULL);
+ gst_object_unref(sink);
+ }
+ setAC3Delay(ac3_delay);
+ setPCMDelay(pcm_delay);
+ } break;
+ case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
+ {
+ } break;
+ case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
+ {
+ } break;
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ {
+ } break;
+ case GST_STATE_CHANGE_READY_TO_NULL:
+ {
+ } break;
+ }
+ break;
+ }
+ case GST_MESSAGE_ERROR:
+ {
+ gchar *debug;
+ GError *err;
+ gst_message_parse_error (msg, &err, &debug);
+ g_free (debug);
+ eWarning("Gstreamer error: %s (%i) from %s", err->message, err->code, sourceName );
+ if ( err->domain == GST_STREAM_ERROR )
+ {
+ if ( err->code == GST_STREAM_ERROR_CODEC_NOT_FOUND )
+ {
+ if ( g_strrstr(sourceName, "videosink") )
+ m_event((iPlayableService*)this, evUser+11);
+ else if ( g_strrstr(sourceName, "audiosink") )
+ m_event((iPlayableService*)this, evUser+10);
+ }
+ }
+ g_error_free(err);
+ break;
+ }
+ case GST_MESSAGE_INFO:
+ {
+ gchar *debug;
+ GError *inf;
+
+ gst_message_parse_info (msg, &inf, &debug);
+ g_free (debug);
+ if ( inf->domain == GST_STREAM_ERROR && inf->code == GST_STREAM_ERROR_DECODE )
+ {
+ if ( g_strrstr(sourceName, "videosink") )
+ m_event((iPlayableService*)this, evUser+14);
+ }
+ g_error_free(inf);
+ break;
+ }
+ case GST_MESSAGE_TAG:
+ {
+ GstTagList *tags, *result;
+ gst_message_parse_tag(msg, &tags);
+
+ result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_REPLACE);
+ if (result)
+ {
+ if (m_stream_tags)
+ gst_tag_list_free(m_stream_tags);
+ m_stream_tags = result;
+ }
+
+ const GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0);
+ if ( gv_image )
+ {
+ GstBuffer *buf_image;
+ buf_image = gst_value_get_buffer (gv_image);
+ int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644);
+ int ret = write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image));
+ close(fd);
+ eDebug("eServiceMP3::/tmp/.id3coverart %d bytes written ", ret);
+ m_event((iPlayableService*)this, evUser+13);
+ }
+ gst_tag_list_free(tags);
+ m_event((iPlayableService*)this, evUpdatedInfo);
+ break;
+ }
+ case GST_MESSAGE_ASYNC_DONE:
+ {
+ if(GST_MESSAGE_SRC(msg) != GST_OBJECT(m_gst_playbin))
+ break;
+
+ GstTagList *tags;
+ gint i, active_idx, n_video = 0, n_audio = 0, n_text = 0;
+
+ g_object_get (m_gst_playbin, "n-video", &n_video, NULL);
+ g_object_get (m_gst_playbin, "n-audio", &n_audio, NULL);
+ g_object_get (m_gst_playbin, "n-text", &n_text, NULL);
+
+ eDebug("eServiceMP3::async-done - %d video, %d audio, %d subtitle", n_video, n_audio, n_text);
+
+ if ( n_video + n_audio <= 0 )
+ stop();
+
+ active_idx = 0;
+
+ m_audioStreams.clear();
+ m_subtitleStreams.clear();
+
+ for (i = 0; i < n_audio; i++)
+ {
+ audioStream audio;
+ gchar *g_codec, *g_lang;
+ GstPad* pad = 0;
+ g_signal_emit_by_name (m_gst_playbin, "get-audio-pad", i, &pad);
+ GstCaps* caps = gst_pad_get_negotiated_caps(pad);
+ if (!caps)
+ continue;
+ GstStructure* str = gst_caps_get_structure(caps, 0);
+ const gchar *g_type = gst_structure_get_name(str);
+ eDebug("AUDIO STRUCT=%s", g_type);
+ audio.type = gstCheckAudioPad(str);
+ g_codec = g_strdup(g_type);
+ g_lang = g_strdup_printf ("und");
+ g_signal_emit_by_name (m_gst_playbin, "get-audio-tags", i, &tags);
+ if ( tags && gst_is_tag_list(tags) )
+ {
+ gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_codec);
+ gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_lang);
+ gst_tag_list_free(tags);
+ }
+ audio.language_code = std::string(g_lang);
+ audio.codec = std::string(g_codec);
+ eDebug("eServiceMP3::audio stream=%i codec=%s language=%s", i, g_codec, g_lang);
+ m_audioStreams.push_back(audio);
+ g_free (g_lang);
+ g_free (g_codec);
+ gst_caps_unref(caps);
+ }
+
+ for (i = 0; i < n_text; i++)
+ {
+ gchar *g_lang;
+// gchar *g_type;
+// GstPad* pad = 0;
+// g_signal_emit_by_name (m_gst_playbin, "get-text-pad", i, &pad);
+// GstCaps* caps = gst_pad_get_negotiated_caps(pad);
+// GstStructure* str = gst_caps_get_structure(caps, 0);
+// g_type = gst_structure_get_name(str);
+// g_signal_emit_by_name (m_gst_playbin, "get-text-tags", i, &tags);
+ subtitleStream subs;
+ subs.type = stPlainText;
+ g_lang = g_strdup_printf ("und");
+ if ( tags && gst_is_tag_list(tags) )
+ gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_lang);
+ subs.language_code = std::string(g_lang);
+ eDebug("eServiceMP3::subtitle stream=%i language=%s"/* type=%s*/, i, g_lang/*, g_type*/);
+ m_subtitleStreams.push_back(subs);
+ g_free (g_lang);
+// g_free (g_type);
+ }
+ m_event((iPlayableService*)this, evUpdatedEventInfo);
+ }
+ case GST_MESSAGE_ELEMENT:
+ {
+ if ( gst_is_missing_plugin_message(msg) )
+ {
+ gchar *description = gst_missing_plugin_message_get_description(msg);
+ if ( description )
+ {
+ m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n";
+ g_free(description);
+ m_event((iPlayableService*)this, evUser+12);
+ }
+ }
+ else if (const GstStructure *msgstruct = gst_message_get_structure(msg))
+ {
+ const gchar *eventname = gst_structure_get_name(msgstruct);
+ if ( eventname )
+ {
+ if (!strcmp(eventname, "eventSizeChanged") || !strcmp(eventname, "eventSizeAvail"))
+ {
+ gst_structure_get_int (msgstruct, "aspect_ratio", &m_aspect);
+ gst_structure_get_int (msgstruct, "width", &m_width);
+ gst_structure_get_int (msgstruct, "height", &m_height);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoSizeChanged);
+ }
+ else if (!strcmp(eventname, "eventFrameRateChanged") || !strcmp(eventname, "eventFrameRateAvail"))
+ {
+ gst_structure_get_int (msgstruct, "frame_rate", &m_framerate);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoFramerateChanged);
+ }
+ else if (!strcmp(eventname, "eventProgressiveChanged") || !strcmp(eventname, "eventProgressiveAvail"))
+ {
+ gst_structure_get_int (msgstruct, "progressive", &m_progressive);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoProgressiveChanged);
+ }
+ }
+ }
+ break;
+ }
+ case GST_MESSAGE_BUFFERING:
+ {
+ GstBufferingMode mode;
+ gst_message_parse_buffering(msg, &(m_bufferInfo.bufferPercent));
+ gst_message_parse_buffering_stats(msg, &mode, &(m_bufferInfo.avgInRate), &(m_bufferInfo.avgOutRate), &(m_bufferInfo.bufferingLeft));
+ m_event((iPlayableService*)this, evBuffering);
+ }
+ default:
+ break;
+ }
+ g_free (sourceName);
+}
+
+GstBusSyncReply eServiceMP3::gstBusSyncHandler(GstBus *bus, GstMessage *message, gpointer user_data)
+{
+ eServiceMP3 *_this = (eServiceMP3*)user_data;
+ _this->m_pump.send(1);
+ /* wake */
+ return GST_BUS_PASS;
+}
+
+audiotype_t eServiceMP3::gstCheckAudioPad(GstStructure* structure)
+{
+ if (!structure)
+ return atUnknown;
+
+ if ( gst_structure_has_name (structure, "audio/mpeg"))
+ {
+ gint mpegversion, layer = -1;
+ if (!gst_structure_get_int (structure, "mpegversion", &mpegversion))
+ return atUnknown;
+
+ switch (mpegversion) {
+ case 1:
+ {
+ gst_structure_get_int (structure, "layer", &layer);
+ if ( layer == 3 )
+ return atMP3;
+ else
+ return atMPEG;
+ break;
+ }
+ case 2:
+ return atAAC;
+ case 4:
+ return atAAC;
+ default:
+ return atUnknown;
+ }
+ }
+
+ else if ( gst_structure_has_name (structure, "audio/x-ac3") || gst_structure_has_name (structure, "audio/ac3") )
+ return atAC3;
+ else if ( gst_structure_has_name (structure, "audio/x-dts") || gst_structure_has_name (structure, "audio/dts") )
+ return atDTS;
+ else if ( gst_structure_has_name (structure, "audio/x-raw-int") )
+ return atPCM;
+
+ return atUnknown;
+}
+
+void eServiceMP3::gstPoll(const int &msg)
+{
+ /* ok, we have a serious problem here. gstBusSyncHandler sends
+ us the wakup signal, but likely before it was posted.
+ the usleep, an EVIL HACK (DON'T DO THAT!!!) works around this.
+
+ I need to understand the API a bit more to make this work
+ proplerly. */
+ if (msg == 1)
+ {
+ GstBus *bus = gst_pipeline_get_bus (GST_PIPELINE (m_gst_playbin));
+ GstMessage *message;
+ usleep(1);
+ while ((message = gst_bus_pop (bus)))
+ {
+ gstBusCall(bus, message);
+ gst_message_unref (message);
+ }
+ }
+ else
+ pullSubtitle();
+}
+
+eAutoInitPtr<eServiceFactoryMP3> init_eServiceFactoryMP3(eAutoInitNumbers::service+1, "eServiceFactoryMP3");
+
+void eServiceMP3::gstCBsubtitleAvail(GstElement *appsink, gpointer user_data)
+{
+ eServiceMP3 *_this = (eServiceMP3*)user_data;
+ eSingleLocker l(_this->m_subs_to_pull_lock);
+ ++_this->m_subs_to_pull;
+ _this->m_pump.send(2);
+}
+
+void eServiceMP3::pullSubtitle()
+{
+ GstElement *sink;
+ g_object_get (G_OBJECT (m_gst_playbin), "text-sink", &sink, NULL);
+ if (sink)
+ {
+ while (m_subs_to_pull && m_subtitle_pages.size() < 2)
+ {
+ GstBuffer *buffer;
+ {
+ eSingleLocker l(m_subs_to_pull_lock);
+ --m_subs_to_pull;
+ g_signal_emit_by_name (sink, "pull-buffer", &buffer);
+ }
+ if (buffer)
+ {
+ gint64 buf_pos = GST_BUFFER_TIMESTAMP(buffer);
+ gint64 duration_ns = GST_BUFFER_DURATION(buffer);
+ size_t len = GST_BUFFER_SIZE(buffer);
+ unsigned char line[len+1];
+ memcpy(line, GST_BUFFER_DATA(buffer), len);
+ line[len] = 0;
+ eDebug("got new subtitle @ buf_pos = %lld ns (in pts=%lld): '%s' ", buf_pos, buf_pos/11111, line);
+ ePangoSubtitlePage page;
+ gRGB rgbcol(0xD0,0xD0,0xD0);
+ page.m_elements.push_back(ePangoSubtitlePageElement(rgbcol, (const char*)line));
+ page.show_pts = buf_pos / 11111L;
+ page.m_timeout = duration_ns / 1000000;
+ m_subtitle_pages.push_back(page);
+ pushSubtitles();
+ gst_buffer_unref(buffer);
+ }
+ }
+ gst_object_unref(sink);
+ }
+ else
+ eDebug("no subtitle sink!");
+}
+
+void eServiceMP3::pushSubtitles()
+{
+ ePangoSubtitlePage page;
+ pts_t running_pts;
+ while ( !m_subtitle_pages.empty() )
+ {
+ getPlayPosition(running_pts);
+ page = m_subtitle_pages.front();
+ gint64 diff_ms = ( page.show_pts - running_pts ) / 90;
+ eDebug("eServiceMP3::pushSubtitles show_pts = %lld running_pts = %lld diff = %lld", page.show_pts, running_pts, diff_ms);
+ if (diff_ms < -100)
+ {
+ GstFormat fmt = GST_FORMAT_TIME;
+ gint64 now;
+ if (gst_element_query_position(m_gst_playbin, &fmt, &now) != -1)
+ {
+ now /= 11111;
+ diff_ms = abs((now - running_pts) / 90);
+ eDebug("diff < -100ms check decoder/pipeline diff: decoder: %lld, pipeline: %lld, diff: %lld", running_pts, now, diff_ms);
+ if (diff_ms > 100000)
+ {
+ eDebug("high decoder/pipeline difference.. assume decoder has now started yet.. check again in 1sec");
+ m_subtitle_sync_timer->start(1000, true);
+ break;
+ }
+ }
+ else
+ eDebug("query position for decoder/pipeline check failed!");
+ eDebug("subtitle to late... drop");
+ m_subtitle_pages.pop_front();
+ }
+ else if ( diff_ms > 20 )
+ {
+// eDebug("start recheck timer");
+ m_subtitle_sync_timer->start(diff_ms > 1000 ? 1000 : diff_ms, true);
+ break;
+ }
+ else // immediate show
+ {
+ if (m_subtitle_widget)
+ m_subtitle_widget->setPage(page);
+ m_subtitle_pages.pop_front();
+ }
+ }
+ if (m_subtitle_pages.empty())
+ pullSubtitle();
+}
+
+RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple)
+{
+ ePyObject entry;
+ int tuplesize = PyTuple_Size(tuple);
+ int pid, type;
+ gint text_pid = 0;
+
+ if (!PyTuple_Check(tuple))
+ goto error_out;
+ if (tuplesize < 1)
+ goto error_out;
+ entry = PyTuple_GET_ITEM(tuple, 1);
+ if (!PyInt_Check(entry))
+ goto error_out;
+ pid = PyInt_AsLong(entry);
+ entry = PyTuple_GET_ITEM(tuple, 2);
+ if (!PyInt_Check(entry))
+ goto error_out;
+ type = PyInt_AsLong(entry);
+
+ if (m_currentSubtitleStream != pid)
+ {
+ eSingleLocker l(m_subs_to_pull_lock);
+ g_object_set (G_OBJECT (m_gst_playbin), "current-text", pid, NULL);
+ m_currentSubtitleStream = pid;
+ m_subs_to_pull = 0;
+ m_subtitle_pages.clear();
+ }
+
+ m_subtitle_widget = 0;
+ m_subtitle_widget = new eSubtitleWidget(parent);
+ m_subtitle_widget->resize(parent->size()); /* full size */
+
+ g_object_get (G_OBJECT (m_gst_playbin), "current-text", &text_pid, NULL);
+
+ eDebug ("eServiceMP3::switched to subtitle stream %i", text_pid);
+
+ return 0;
+
+error_out:
+ eDebug("eServiceMP3::enableSubtitles needs a tuple as 2nd argument!\n"
+ "for gst subtitles (2, subtitle_stream_count, subtitle_type)");
+ return -1;
+}
+
+RESULT eServiceMP3::disableSubtitles(eWidget *parent)
+{
+ eDebug("eServiceMP3::disableSubtitles");
+ m_subtitle_pages.clear();
+ delete m_subtitle_widget;
+ m_subtitle_widget = 0;
+ return 0;
+}
+
+PyObject *eServiceMP3::getCachedSubtitle()
+{
+// eDebug("eServiceMP3::getCachedSubtitle");
+ Py_RETURN_NONE;
+}
+
+PyObject *eServiceMP3::getSubtitleList()
+{
+ eDebug("eServiceMP3::getSubtitleList");
+
+ ePyObject l = PyList_New(0);
+ int stream_count[sizeof(subtype_t)];
+ for ( unsigned int i = 0; i < sizeof(subtype_t); i++ )
+ stream_count[i] = 0;
+
+ for (std::vector<subtitleStream>::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream)
+ {
+ subtype_t type = IterSubtitleStream->type;
+ ePyObject tuple = PyTuple_New(5);
+ PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(2));
+ PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(stream_count[type]));
+ PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(int(type)));
+ PyTuple_SET_ITEM(tuple, 3, PyInt_FromLong(0));
+ PyTuple_SET_ITEM(tuple, 4, PyString_FromString((IterSubtitleStream->language_code).c_str()));
+ PyList_Append(l, tuple);
+ Py_DECREF(tuple);
+ stream_count[type]++;
+ }
+ return l;
+}
+
+RESULT eServiceMP3::streamed(ePtr<iStreamedService> &ptr)
+{
+ ptr = this;
+ return 0;
+}
+
+PyObject *eServiceMP3::getBufferCharge()
+{
+ ePyObject tuple = PyTuple_New(5);
+ PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(m_bufferInfo.bufferPercent));
+ PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(m_bufferInfo.avgInRate));
+ PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(m_bufferInfo.avgOutRate));
+ PyTuple_SET_ITEM(tuple, 3, PyInt_FromLong(m_bufferInfo.bufferingLeft));
+ PyTuple_SET_ITEM(tuple, 4, PyInt_FromLong(m_buffer_size));
+ return tuple;
+}
+
+int eServiceMP3::setBufferSize(int size)
+{
+ m_buffer_size = size;
+ g_object_set (G_OBJECT (m_gst_playbin), "buffer-size", m_buffer_size, NULL);
+ return 0;
+}
+
+int eServiceMP3::getAC3Delay()
+{
+ return ac3_delay;
+}
+
+int eServiceMP3::getPCMDelay()
+{
+ return pcm_delay;
+}
+
+void eServiceMP3::setAC3Delay(int delay)
+{
+ ac3_delay = delay;
+ if (!m_gst_playbin || m_state != stRunning)
+ return;
+ else
+ {
+ GstElement *sink;
+ int config_delay_int = delay;
+ g_object_get (G_OBJECT (m_gst_playbin), "video-sink", &sink, NULL);
+
+ if (sink)
+ {
+ std::string config_delay;
+ if(ePythonConfigQuery::getConfigValue("config.av.generalAC3delay", config_delay) == 0)
+ config_delay_int += atoi(config_delay.c_str());
+ gst_object_unref(sink);
+ }
+ else
+ {
+ eDebug("dont apply ac3 delay when no video is running!");
+ config_delay_int = 0;
+ }
+
+ g_object_get (G_OBJECT (m_gst_playbin), "audio-sink", &sink, NULL);
+
+ if (sink)
+ {
+ gchar *name = gst_element_get_name(sink);
+ if (strstr(name, "dvbaudiosink"))
+ eTSMPEGDecoder::setHwAC3Delay(config_delay_int);
+ g_free(name);
+ gst_object_unref(sink);
+ }
+ }
+}
+
+void eServiceMP3::setPCMDelay(int delay)
+{
+ pcm_delay = delay;
+ if (!m_gst_playbin || m_state != stRunning)
+ return;
+ else
+ {
+ GstElement *sink;
+ int config_delay_int = delay;
+ g_object_get (G_OBJECT (m_gst_playbin), "video-sink", &sink, NULL);
+
+ if (sink)
+ {
+ std::string config_delay;
+ if(ePythonConfigQuery::getConfigValue("config.av.generalPCMdelay", config_delay) == 0)
+ config_delay_int += atoi(config_delay.c_str());
+ gst_object_unref(sink);
+ }
+ else
+ {
+ eDebug("dont apply pcm delay when no video is running!");
+ config_delay_int = 0;
+ }
+
+ g_object_get (G_OBJECT (m_gst_playbin), "audio-sink", &sink, NULL);
+
+ if (sink)
+ {
+ gchar *name = gst_element_get_name(sink);
+ if (strstr(name, "dvbaudiosink"))
+ eTSMPEGDecoder::setHwPCMDelay(config_delay_int);
+ else
+ {
+ // this is realy untested..and not used yet
+ gint64 offset = config_delay_int;
+ offset *= 1000000; // milli to nano
+ g_object_set (G_OBJECT (m_gst_playbin), "ts-offset", offset, NULL);
+ }
+ g_free(name);
+ gst_object_unref(sink);
+ }
+ }
+}