+ if (is_mpeg_ps)
+ videodemux = gst_element_factory_make("flupsdemux", "videodemux");
+ else if (is_mpeg_ts)
+ videodemux = gst_element_factory_make("flutsdemux", "videodemux");
+ else if (is_matroska)
+ videodemux = gst_element_factory_make("matroskademux", "videodemux");
+ else if (is_avi)
+ videodemux = gst_element_factory_make("avidemux", "videodemux");
+
+ if (!videodemux)
+ {
+ eDebug("fluendo mpegdemux not available, falling back to mpegdemux\n");
+ videodemux = gst_element_factory_make("mpegdemux", "videodemux");
+ }
+
+ eDebug("audio: %p, queue_audio %p, video %p, queue_video %p, videodemux %p", audio, queue_audio, video, queue_video, videodemux);
+ if (audio && queue_audio && video && queue_video && videodemux)
+ {
+ g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 256*1024, NULL);
+ g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 0, NULL);
+ g_object_set (G_OBJECT (queue_audio), "max-size-time", (guint64)0, NULL);
+ g_object_set (G_OBJECT (queue_video), "max-size-buffers", 0, NULL);
+ g_object_set (G_OBJECT (queue_video), "max-size-bytes", 2*1024*1024, NULL);
+ g_object_set (G_OBJECT (queue_video), "max-size-time", (guint64)0, NULL);
+ all_ok = 1;
+ }
+ }
+
+ if (m_gst_pipeline && all_ok)
+ {
+ gst_bus_set_sync_handler(gst_pipeline_get_bus (GST_PIPELINE (m_gst_pipeline)), gstBusSyncHandler, this);
+
+ if (is_AudioCD)
+ {
+ queue_audio = gst_element_factory_make("queue", "queue_audio");
+ g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
+ gst_bin_add_many (GST_BIN (m_gst_pipeline), source, queue_audio, conv, sink, NULL);
+ gst_element_link_many(source, queue_audio, conv, sink, NULL);
+ }
+ else if (is_audio)
+ {
+ queue_audio = gst_element_factory_make("queue", "queue_audio");
+
+ if (!is_mp3)
+ {
+ /* decodebin has dynamic pads. When they get created, we connect them to the audio bin */
+ g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this);
+ g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this);
+ g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
+ }
+
+ /* gst_bin will take the 'floating references' */
+ gst_bin_add_many (GST_BIN (m_gst_pipeline),
+ source, queue_audio, decoder, NULL);
+
+ if (filter)
+ {
+ /* id3demux also has dynamic pads, which need to be connected to the decoder (this is done in the 'gstCBfilterPadAdded' CB) */
+ gst_bin_add(GST_BIN(m_gst_pipeline), filter);
+ gst_element_link(source, filter);
+ g_signal_connect (filter, "pad-added", G_CALLBACK(gstCBfilterPadAdded), this);
+ } else
+ /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */
+ gst_element_link_many(source, queue_audio, decoder, NULL);
+
+ /* create audio bin with the audioconverter, the capsfilter and the audiosink */
+ audio = gst_bin_new ("audiobin");
+
+ GstPad *audiopad = gst_element_get_static_pad (conv, "sink");
+ gst_bin_add_many(GST_BIN(audio), conv, flt, sink, (char*)0);
+ gst_element_link_many(conv, flt, sink, (char*)0);
+ gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad));
+ gst_object_unref(audiopad);
+ gst_bin_add (GST_BIN(m_gst_pipeline), audio);
+ /* in mad's case, we can directly connect the decoder to the audiobin. otherwise, we do this in gstCBnewPad */
+ if (is_mp3)
+ gst_element_link(decoder, audio);
+ audioStream audioStreamElem;
+ m_audioStreams.push_back(audioStreamElem);
+ } else /* is_video */
+ {
+ char srt_filename[strlen(filename)+1];
+ strncpy(srt_filename,filename,strlen(filename)-3);
+ srt_filename[strlen(filename)-3]='\0';
+ strcat(srt_filename, "srt");
+ struct stat buffer;
+ if (stat(srt_filename, &buffer) == 0)
+ {
+ eDebug("subtitle file found: %s",srt_filename);
+ GstElement *subsource;
+ subsource = gst_element_factory_make ("filesrc", "srt_source");
+ g_object_set (G_OBJECT (subsource), "location", filename, NULL);
+ GstElement *parser = gst_element_factory_make("subparse", "srt_parse");
+ eDebug ("subparse = %p", parser);
+ GstElement *sink = gst_element_factory_make("fakesink", "srt_sink");
+ eDebug ("fakesink = %p", sink);
+ g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
+ gst_bin_add_many(GST_BIN (m_gst_pipeline), subsource, parser, sink, NULL);
+ gboolean res = gst_element_link(subsource, parser);
+ eDebug ("parser link = %d", res);
+ res = gst_element_link(parser, sink);
+ eDebug ("sink link = %d", res);
+ g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), this);
+ subtitleStream subs;
+ subs.element = sink;
+ m_subtitleStreams.push_back(subs);
+ }
+ else
+ eDebug("subtitle file not found: %s",srt_filename);
+
+ gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, NULL);
+ switch_audio = gst_element_factory_make ("input-selector", "switch_audio");
+ if (switch_audio)
+ {
+ g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
+ gst_bin_add(GST_BIN(m_gst_pipeline), switch_audio);
+ gst_element_link(switch_audio, queue_audio);
+ }
+ gst_element_link(source, videodemux);
+ gst_element_link(queue_audio, audio);
+ gst_element_link(queue_video, video);
+ g_signal_connect(videodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+ }