+
+ /* FIXME: currently, decodebin isn't possible for
+ video streams. in that case, make a manual pipeline. */
+
+ const char *ext = strrchr(filename, '.');
+ if (!ext)
+ ext = filename;
+
+ sourceStream sourceinfo;
+ sourceinfo.is_video = FALSE;
+ sourceinfo.audiotype = atUnknown;
+ if ( (strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat") ) == 0 )
+ {
+ sourceinfo.containertype = ctMPEGPS;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".ts") == 0 )
+ {
+ sourceinfo.containertype = ctMPEGTS;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".mkv") == 0 )
+ {
+ sourceinfo.containertype = ctMKV;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".avi") == 0 || strcasecmp(ext, ".divx") == 0)
+ {
+ sourceinfo.containertype = ctAVI;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".mp4") == 0 )
+ {
+ sourceinfo.containertype = ctMP4;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".m4a") == 0 )
+ {
+ sourceinfo.containertype = ctMP4;
+ sourceinfo.audiotype = atAAC;
+ }
+ else if ( strcasecmp(ext, ".mp3") == 0 )
+ sourceinfo.audiotype = atMP3;
+ else if ( (strncmp(filename, "/autofs/", 8) || strncmp(filename+strlen(filename)-13, "/track-", 7) || strcasecmp(ext, ".wav")) == 0 )
+ sourceinfo.containertype = ctCDA;
+ if ( strcasecmp(ext, ".dat") == 0 )
+ {
+ sourceinfo.containertype = ctVCD;
+ sourceinfo.is_video = TRUE;
+ }
+ if ( (strncmp(filename, "http://", 7)) == 0 )
+ sourceinfo.is_streaming = TRUE;
+
+ eDebug("filename=%s, containertype=%d, is_video=%d, is_streaming=%d", filename, sourceinfo.containertype, sourceinfo.is_video, sourceinfo.is_streaming);
+
+ int all_ok = 0;
+
+ m_gst_pipeline = gst_pipeline_new ("mediaplayer");
+ if (!m_gst_pipeline)
+ m_error_message = "failed to create GStreamer pipeline!\n";
+
+ if ( sourceinfo.is_streaming )
+ {
+ eDebug("play webradio!");
+ source = gst_element_factory_make ("neonhttpsrc", "http-source");
+ if (source)
+ {
+ g_object_set (G_OBJECT (source), "location", filename, NULL);
+ g_object_set (G_OBJECT (source), "automatic-redirect", TRUE, NULL);
+ }
+ else
+ m_error_message = "GStreamer plugin neonhttpsrc not available!\n";
+ }
+ else if ( sourceinfo.containertype == ctCDA )
+ {
+ source = gst_element_factory_make ("cdiocddasrc", "cda-source");
+ if (source)
+ {
+ g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
+ int track = atoi(filename+18);
+ eDebug("play audio CD track #%i",track);
+ if (track > 0)
+ g_object_set (G_OBJECT (source), "track", track, NULL);
+ }
+ }
+ else if ( sourceinfo.containertype == ctVCD )
+ {
+ int fd = open(filename,O_RDONLY);
+ char tmp[128*1024];
+ int ret = read(fd, tmp, 128*1024);
+ close(fd);
+ if ( ret == -1 ) // this is a "REAL" VCD
+ {
+ source = gst_element_factory_make ("vcdsrc", "vcd-source");
+ if (source)
+ {
+ g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
+ eDebug("servicemp3: this is a 'REAL' video cd... we use vcdsrc !");
+ }
+ }
+ }
+ if ( !source && !sourceinfo.is_streaming )
+ {
+ source = gst_element_factory_make ("filesrc", "file-source");
+ if (source)
+ g_object_set (G_OBJECT (source), "location", filename, NULL);
+ else
+ m_error_message = "GStreamer can't open filesrc " + (std::string)filename + "!\n";
+ }
+ if ( sourceinfo.is_video )
+ {
+ /* filesrc -> mpegdemux -> | queue_audio -> dvbaudiosink
+ | queue_video -> dvbvideosink */
+
+ audio = gst_element_factory_make("dvbaudiosink", "audiosink");
+ if (!audio)
+ m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+
+ video = gst_element_factory_make("dvbvideosink", "videosink");
+ if (!video)
+ m_error_message += "failed to create Gstreamer element dvbvideosink\n";
+
+ queue_audio = gst_element_factory_make("queue", "queue_audio");
+ queue_video = gst_element_factory_make("queue", "queue_video");
+
+ std::string demux_type;
+ switch (sourceinfo.containertype)
+ {
+ case ctMPEGTS:
+ demux_type = "mpegtsdemux";
+ break;
+ case ctMPEGPS:
+ case ctVCD:
+ demux_type = "mpegpsdemux";
+ break;
+ case ctMKV:
+ demux_type = "matroskademux";
+ break;
+ case ctAVI:
+ demux_type = "avidemux";
+ break;
+ case ctMP4:
+ demux_type = "qtdemux";
+ break;
+ default:
+ break;
+ }
+ videodemux = gst_element_factory_make(demux_type.c_str(), "videodemux");
+ if (!videodemux)
+ m_error_message = "GStreamer plugin " + demux_type + " not available!\n";
+
+ switch_audio = gst_element_factory_make ("input-selector", "switch_audio");
+ if (!switch_audio)
+ m_error_message = "GStreamer plugin input-selector not available!\n";
+
+ if (audio && queue_audio && video && queue_video && videodemux && switch_audio)
+ {
+ g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 256*1024, NULL);
+ g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 0, NULL);
+ g_object_set (G_OBJECT (queue_audio), "max-size-time", (guint64)0, NULL);
+ g_object_set (G_OBJECT (queue_video), "max-size-buffers", 0, NULL);
+ g_object_set (G_OBJECT (queue_video), "max-size-bytes", 2*1024*1024, NULL);
+ g_object_set (G_OBJECT (queue_video), "max-size-time", (guint64)0, NULL);
+ g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
+ all_ok = 1;
+ }
+ } else /* is audio */
+ {
+ std::string demux_type;
+ switch ( sourceinfo.containertype )
+ {
+ case ctMP4:
+ demux_type = "qtdemux";
+ break;
+ default:
+ break;
+ }
+ if ( demux_type.length() )
+ {
+ audiodemux = gst_element_factory_make(demux_type.c_str(), "audiodemux");
+ if (!audiodemux)
+ m_error_message = "GStreamer plugin " + demux_type + " not available!\n";
+ }
+ switch ( sourceinfo.audiotype )
+ {
+ case atMP3:
+ {
+ id3demux = gst_element_factory_make("id3demux", "id3demux");
+ if ( !id3demux )
+ {
+ m_error_message += "failed to create Gstreamer element id3demux\n";
+ break;
+ }
+ parser = gst_element_factory_make("mp3parse", "audiosink");
+ if ( !parser )
+ {
+ m_error_message += "failed to create Gstreamer element mp3parse\n";
+ break;
+ }
+ sink = gst_element_factory_make("dvbaudiosink", "audiosink2");
+ if ( !sink )
+ m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+ else
+ all_ok = 1;
+ break;
+ }
+ case atAAC:
+ {
+ if ( !audiodemux )
+ {
+ m_error_message += "cannot parse raw AAC audio\n";
+ break;
+ }
+ sink = gst_element_factory_make("dvbaudiosink", "audiosink");
+ if (!sink)
+ m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+ else
+ all_ok = 1;
+ break;
+ }
+ case atAC3:
+ {
+ if ( !audiodemux )
+ {
+ m_error_message += "cannot parse raw AC3 audio\n";
+ break;
+ }
+ sink = gst_element_factory_make("dvbaudiosink", "audiosink");
+ if ( !sink )
+ m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+ else
+ all_ok = 1;
+ break;
+ }
+ default:
+ { /* filesrc -> decodebin -> audioconvert -> capsfilter -> alsasink */
+ decoder = gst_element_factory_make ("decodebin", "decoder");
+ if (!decoder)
+ m_error_message += "failed to create Gstreamer element decodebin\n";
+
+ conv = gst_element_factory_make ("audioconvert", "converter");
+ if (!conv)
+ m_error_message += "failed to create Gstreamer element audioconvert\n";
+
+ flt = gst_element_factory_make ("capsfilter", "flt");
+ if (!flt)
+ m_error_message += "failed to create Gstreamer element capsfilter\n";
+
+ /* for some reasons, we need to set the sample format to depth/width=16, because auto negotiation doesn't work. */
+ /* endianness, however, is not required to be set anymore. */
+ if (flt)
+ {
+ GstCaps *caps = gst_caps_new_simple("audio/x-raw-int", /* "endianness", G_TYPE_INT, 4321, */ "depth", G_TYPE_INT, 16, "width", G_TYPE_INT, 16, /*"channels", G_TYPE_INT, 2, */NULL);
+ g_object_set (G_OBJECT (flt), "caps", caps, NULL);
+ gst_caps_unref(caps);
+ }
+
+ sink = gst_element_factory_make ("alsasink", "alsa-output");
+ if (!sink)
+ m_error_message += "failed to create Gstreamer element alsasink\n";
+
+ if (source && decoder && conv && sink)
+ all_ok = 1;
+ break;
+ }
+ }
+
+ }
+ if (m_gst_pipeline && all_ok)
+ {
+ gst_bus_set_sync_handler(gst_pipeline_get_bus (GST_PIPELINE (m_gst_pipeline)), gstBusSyncHandler, this);
+
+ if ( sourceinfo.containertype == ctCDA )
+ {
+ queue_audio = gst_element_factory_make("queue", "queue_audio");
+ g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
+ gst_bin_add_many (GST_BIN (m_gst_pipeline), source, queue_audio, conv, sink, NULL);
+ gst_element_link_many(source, queue_audio, conv, sink, NULL);
+ }
+ else if ( sourceinfo.is_video )
+ {
+ char srt_filename[strlen(filename)+1];
+ strncpy(srt_filename,filename,strlen(filename)-3);
+ srt_filename[strlen(filename)-3]='\0';
+ strcat(srt_filename, "srt");
+ struct stat buffer;
+ if (stat(srt_filename, &buffer) == 0)
+ {
+ eDebug("subtitle file found: %s",srt_filename);
+ GstElement *subsource = gst_element_factory_make ("filesrc", "srt_source");
+ g_object_set (G_OBJECT (subsource), "location", srt_filename, NULL);
+ gst_bin_add(GST_BIN (m_gst_pipeline), subsource);
+ GstPad *switchpad = gstCreateSubtitleSink(this, stSRT);
+ gst_pad_link(gst_element_get_pad (subsource, "src"), switchpad);
+ subtitleStream subs;
+ subs.pad = switchpad;
+ subs.type = stSRT;
+ subs.language_code = std::string("und");
+ m_subtitleStreams.push_back(subs);
+ }
+ gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, switch_audio, NULL);
+
+ if ( sourceinfo.containertype == ctVCD && gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"file-source") )
+ {
+ eDebug("servicemp3: this is a fake video cd... we use filesrc ! cdxaparse !");
+ GstElement *cdxaparse = gst_element_factory_make("cdxaparse", "cdxaparse");
+ gst_bin_add(GST_BIN(m_gst_pipeline), cdxaparse);
+ gst_element_link(source, cdxaparse);
+ gst_element_link(cdxaparse, videodemux);
+ }
+ else
+ gst_element_link(source, videodemux);
+
+ gst_element_link(switch_audio, queue_audio);
+ gst_element_link(queue_audio, audio);
+ gst_element_link(queue_video, video);
+ g_signal_connect(videodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+
+ } else /* is audio*/
+ {
+ if ( decoder )
+ {
+ queue_audio = gst_element_factory_make("queue", "queue_audio");
+
+ g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this);
+ g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this);
+
+ g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
+
+ /* gst_bin will take the 'floating references' */
+ gst_bin_add_many (GST_BIN (m_gst_pipeline),
+ source, queue_audio, decoder, NULL);
+
+ /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */
+ gst_element_link_many(source, queue_audio, decoder, NULL);
+
+ /* create audio bin with the audioconverter, the capsfilter and the audiosink */
+ audio = gst_bin_new ("audiobin");
+
+ GstPad *audiopad = gst_element_get_static_pad (conv, "sink");
+ gst_bin_add_many(GST_BIN(audio), conv, flt, sink, NULL);
+ gst_element_link_many(conv, flt, sink, NULL);
+ gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad));
+ gst_object_unref(audiopad);
+ gst_bin_add (GST_BIN(m_gst_pipeline), audio);
+ }
+ else
+ {
+ gst_bin_add_many (GST_BIN (m_gst_pipeline), source, sink, NULL);
+ if ( parser && id3demux )
+ {
+ gst_bin_add_many (GST_BIN (m_gst_pipeline), parser, id3demux, NULL);
+ gst_element_link(source, id3demux);
+ g_signal_connect(id3demux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+ gst_element_link(parser, sink);
+ }
+ if ( audiodemux )
+ {
+ gst_bin_add (GST_BIN (m_gst_pipeline), audiodemux);
+ g_signal_connect(audiodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+ gst_element_link(source, audiodemux);
+ }
+ audioStream audio;
+ audio.type = sourceinfo.audiotype;
+ m_audioStreams.push_back(audio);
+ }
+ }
+ } else
+ {
+ m_event((iPlayableService*)this, evUser+12);
+
+ if (m_gst_pipeline)
+ gst_object_unref(GST_OBJECT(m_gst_pipeline));
+ if (source)
+ gst_object_unref(GST_OBJECT(source));
+ if (decoder)
+ gst_object_unref(GST_OBJECT(decoder));
+ if (conv)
+ gst_object_unref(GST_OBJECT(conv));
+ if (sink)
+ gst_object_unref(GST_OBJECT(sink));
+
+ if (audio)
+ gst_object_unref(GST_OBJECT(audio));
+ if (queue_audio)
+ gst_object_unref(GST_OBJECT(queue_audio));
+ if (video)
+ gst_object_unref(GST_OBJECT(video));
+ if (queue_video)
+ gst_object_unref(GST_OBJECT(queue_video));
+ if (videodemux)
+ gst_object_unref(GST_OBJECT(videodemux));
+ if (switch_audio)
+ gst_object_unref(GST_OBJECT(switch_audio));
+
+ eDebug("sorry, can't play: %s",m_error_message.c_str());
+ m_gst_pipeline = 0;
+ }
+
+ gst_element_set_state (m_gst_pipeline, GST_STATE_PLAYING);