if (sc)
{
std::list<std::string> extensions;
+ extensions.push_back("mp2");
extensions.push_back("mp3");
extensions.push_back("ogg");
extensions.push_back("mpg");
extensions.push_back("wave");
extensions.push_back("mkv");
extensions.push_back("avi");
+ extensions.push_back("divx");
extensions.push_back("dat");
extensions.push_back("flac");
+ extensions.push_back("mp4");
+ extensions.push_back("m4a");
sc->addServiceFactory(eServiceFactoryMP3::id, this, extensions);
}
eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eApp, 1)
{
+ m_seekTimeout = eTimer::create(eApp);
m_stream_tags = 0;
- m_audioStreams.clear();
- m_subtitleStreams.clear();
m_currentAudioStream = 0;
m_currentSubtitleStream = 0;
m_subtitle_widget = 0;
m_currentTrickRatio = 0;
- CONNECT(m_seekTimeout.timeout, eServiceMP3::seekTimeoutCB);
+ CONNECT(m_seekTimeout->timeout, eServiceMP3::seekTimeoutCB);
CONNECT(m_pump.recv_msg, eServiceMP3::gstPoll);
GstElement *source = 0;
-
- GstElement *decoder = 0, *conv = 0, *flt = 0, *sink = 0; /* for audio */
-
- GstElement *audio = 0, *switch_audio = 0, *queue_audio = 0, *video = 0, *queue_video = 0, *videodemux = 0;
-
+ GstElement *decoder = 0, *conv = 0, *flt = 0, *parser = 0, *sink = 0; /* for audio */
+ GstElement *audio = 0, *switch_audio = 0, *queue_audio = 0, *video = 0, *queue_video = 0, *videodemux = 0, *audiodemux = 0, *id3demux;
+ m_aspect = m_width = m_height = m_framerate = m_progressive = -1;
+
m_state = stIdle;
eDebug("SERVICEMP3 construct!");
if (!ext)
ext = filename;
- int is_mpeg_ps = !(strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat"));
- int is_mpeg_ts = !strcasecmp(ext, ".ts");
- int is_matroska = !strcasecmp(ext, ".mkv");
- int is_avi = !strcasecmp(ext, ".avi");
- int is_mp3 = !strcasecmp(ext, ".mp3"); /* force mp3 instead of decodebin */
- int is_video = is_mpeg_ps || is_mpeg_ts || is_matroska || is_avi;
- int is_streaming = !strncmp(filename, "http://", 7);
- int is_AudioCD = !(strncmp(filename, "/autofs/", 8) || strncmp(filename+strlen(filename)-13, "/track-", 7) || strcasecmp(ext, ".wav"));
- int is_VCD = !strcasecmp(ext, ".dat");
-
- eDebug("filename: %s, is_mpeg_ps: %d, is_mpeg_ts: %d, is_video: %d, is_streaming: %d, is_mp3: %d, is_matroska: %d, is_avi: %d, is_AudioCD: %d, is_VCD: %d", filename, is_mpeg_ps, is_mpeg_ts, is_video, is_streaming, is_mp3, is_matroska, is_avi, is_AudioCD, is_VCD);
-
- int is_audio = !is_video;
+ sourceStream sourceinfo;
+ sourceinfo.is_video = FALSE;
+ sourceinfo.audiotype = atUnknown;
+ if ( (strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat") ) == 0 )
+ {
+ sourceinfo.containertype = ctMPEGPS;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".ts") == 0 )
+ {
+ sourceinfo.containertype = ctMPEGTS;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".mkv") == 0 )
+ {
+ sourceinfo.containertype = ctMKV;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".avi") == 0 || strcasecmp(ext, ".divx") == 0)
+ {
+ sourceinfo.containertype = ctAVI;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".mp4") == 0 )
+ {
+ sourceinfo.containertype = ctMP4;
+ sourceinfo.is_video = TRUE;
+ }
+ else if ( strcasecmp(ext, ".m4a") == 0 )
+ {
+ sourceinfo.containertype = ctMP4;
+ sourceinfo.audiotype = atAAC;
+ }
+ else if ( strcasecmp(ext, ".mp3") == 0 )
+ sourceinfo.audiotype = atMP3;
+ else if ( (strncmp(filename, "/autofs/", 8) || strncmp(filename+strlen(filename)-13, "/track-", 7) || strcasecmp(ext, ".wav")) == 0 )
+ sourceinfo.containertype = ctCDA;
+ if ( strcasecmp(ext, ".dat") == 0 )
+ {
+ sourceinfo.containertype = ctVCD;
+ sourceinfo.is_video = TRUE;
+ }
+ if ( (strncmp(filename, "http://", 7)) == 0 )
+ sourceinfo.is_streaming = TRUE;
+
+ eDebug("filename=%s, containertype=%d, is_video=%d, is_streaming=%d", filename, sourceinfo.containertype, sourceinfo.is_video, sourceinfo.is_streaming);
int all_ok = 0;
m_gst_pipeline = gst_pipeline_new ("mediaplayer");
if (!m_gst_pipeline)
- eWarning("failed to create pipeline");
+ m_error_message = "failed to create GStreamer pipeline!\n";
- if (is_AudioCD)
+ if ( sourceinfo.is_streaming )
{
- source = gst_element_factory_make ("cdiocddasrc", "cda-source");
+ eDebug("play webradio!");
+ source = gst_element_factory_make ("neonhttpsrc", "http-source");
if (source)
- g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
+ {
+ g_object_set (G_OBJECT (source), "location", filename, NULL);
+ g_object_set (G_OBJECT (source), "automatic-redirect", TRUE, NULL);
+ }
else
- is_AudioCD = 0;
+ m_error_message = "GStreamer plugin neonhttpsrc not available!\n";
}
- if ( !is_streaming && !is_AudioCD )
- source = gst_element_factory_make ("filesrc", "file-source");
- else if ( is_streaming )
+ else if ( sourceinfo.containertype == ctCDA )
{
- source = gst_element_factory_make ("neonhttpsrc", "http-source");
+ source = gst_element_factory_make ("cdiocddasrc", "cda-source");
if (source)
- g_object_set (G_OBJECT (source), "automatic-redirect", TRUE, NULL);
- }
-
- if (!source)
- eWarning("failed to create %s", is_streaming ? "neonhttpsrc" : "filesrc");
- /* configure source */
- else if (!is_AudioCD)
- g_object_set (G_OBJECT (source), "location", filename, NULL);
- else
- {
- int track = atoi(filename+18);
- eDebug("play audio CD track #%i",track);
- if (track > 0)
- g_object_set (G_OBJECT (source), "track", track, NULL);
+ {
+ g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
+ int track = atoi(filename+18);
+ eDebug("play audio CD track #%i",track);
+ if (track > 0)
+ g_object_set (G_OBJECT (source), "track", track, NULL);
+ }
}
-
- if (is_audio)
+ else if ( sourceinfo.containertype == ctVCD )
{
- /* filesrc -> decodebin -> audioconvert -> capsfilter -> alsasink */
- const char *decodertype = "decodebin";
-
- decoder = gst_element_factory_make (decodertype, "decoder");
- if (!decoder)
- eWarning("failed to create %s decoder", decodertype);
-
- conv = gst_element_factory_make ("audioconvert", "converter");
- if (!conv)
- eWarning("failed to create audioconvert");
-
- flt = gst_element_factory_make ("capsfilter", "flt");
- if (!flt)
- eWarning("failed to create capsfilter");
-
- /* for some reasons, we need to set the sample format to depth/width=16, because auto negotiation doesn't work. */
- /* endianness, however, is not required to be set anymore. */
- if (flt)
+ int fd = open(filename,O_RDONLY);
+ char tmp[128*1024];
+ int ret = read(fd, tmp, 128*1024);
+ close(fd);
+ if ( ret == -1 ) // this is a "REAL" VCD
{
- GstCaps *caps = gst_caps_new_simple("audio/x-raw-int", /* "endianness", G_TYPE_INT, 4321, */ "depth", G_TYPE_INT, 16, "width", G_TYPE_INT, 16, /*"channels", G_TYPE_INT, 2, */(char*)0);
- g_object_set (G_OBJECT (flt), "caps", caps, (char*)0);
- gst_caps_unref(caps);
+ source = gst_element_factory_make ("vcdsrc", "vcd-source");
+ if (source)
+ {
+ g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
+ eDebug("servicemp3: this is a 'REAL' video cd... we use vcdsrc !");
+ }
}
-
- sink = gst_element_factory_make ("alsasink", "alsa-output");
- if (!sink)
- eWarning("failed to create osssink");
-
- if (source && decoder && conv && sink)
- all_ok = 1;
- } else /* is_video */
+ }
+ if ( !source && !sourceinfo.is_streaming )
+ {
+ source = gst_element_factory_make ("filesrc", "file-source");
+ if (source)
+ g_object_set (G_OBJECT (source), "location", filename, NULL);
+ else
+ m_error_message = "GStreamer can't open filesrc " + (std::string)filename + "!\n";
+ }
+ if ( sourceinfo.is_video )
{
/* filesrc -> mpegdemux -> | queue_audio -> dvbaudiosink
| queue_video -> dvbvideosink */
audio = gst_element_factory_make("dvbaudiosink", "audiosink");
- queue_audio = gst_element_factory_make("queue", "queue_audio");
-
+ if (!audio)
+ m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+
video = gst_element_factory_make("dvbvideosink", "videosink");
+ if (!video)
+ m_error_message += "failed to create Gstreamer element dvbvideosink\n";
+
+ queue_audio = gst_element_factory_make("queue", "queue_audio");
queue_video = gst_element_factory_make("queue", "queue_video");
-
- if (is_mpeg_ps)
- videodemux = gst_element_factory_make("flupsdemux", "videodemux");
- else if (is_mpeg_ts)
- videodemux = gst_element_factory_make("flutsdemux", "videodemux");
- else if (is_matroska)
- videodemux = gst_element_factory_make("matroskademux", "videodemux");
- else if (is_avi)
- videodemux = gst_element_factory_make("avidemux", "videodemux");
- if (!videodemux)
+ std::string demux_type;
+ switch (sourceinfo.containertype)
{
- eDebug("fluendo mpegdemux not available, falling back to mpegdemux\n");
- videodemux = gst_element_factory_make("mpegdemux", "videodemux");
+ case ctMPEGTS:
+ demux_type = "flutsdemux";
+ break;
+ case ctMPEGPS:
+ case ctVCD:
+ demux_type = "flupsdemux";
+ break;
+ case ctMKV:
+ demux_type = "matroskademux";
+ break;
+ case ctAVI:
+ demux_type = "avidemux";
+ break;
+ case ctMP4:
+ demux_type = "qtdemux";
+ break;
+ default:
+ break;
}
+ videodemux = gst_element_factory_make(demux_type.c_str(), "videodemux");
+ if (!videodemux)
+ m_error_message = "GStreamer plugin " + demux_type + " not available!\n";
+
+ switch_audio = gst_element_factory_make ("input-selector", "switch_audio");
+ if (!switch_audio)
+ m_error_message = "GStreamer plugin input-selector not available!\n";
- eDebug("audio: %p, queue_audio %p, video %p, queue_video %p, videodemux %p", audio, queue_audio, video, queue_video, videodemux);
- if (audio && queue_audio && video && queue_video && videodemux)
+ if (audio && queue_audio && video && queue_video && videodemux && switch_audio)
{
g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 256*1024, NULL);
g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 0, NULL);
g_object_set (G_OBJECT (queue_video), "max-size-buffers", 0, NULL);
g_object_set (G_OBJECT (queue_video), "max-size-bytes", 2*1024*1024, NULL);
g_object_set (G_OBJECT (queue_video), "max-size-time", (guint64)0, NULL);
+ g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
all_ok = 1;
}
+ } else /* is audio */
+ {
+ std::string demux_type;
+ switch ( sourceinfo.containertype )
+ {
+ case ctMP4:
+ demux_type = "qtdemux";
+ break;
+ default:
+ break;
+ }
+ if ( demux_type.length() )
+ {
+ audiodemux = gst_element_factory_make(demux_type.c_str(), "audiodemux");
+ if (!audiodemux)
+ m_error_message = "GStreamer plugin " + demux_type + " not available!\n";
+ }
+ switch ( sourceinfo.audiotype )
+ {
+ case atMP3:
+ {
+ id3demux = gst_element_factory_make("id3demux", "id3demux");
+ if ( !id3demux )
+ {
+ m_error_message += "failed to create Gstreamer element id3demux\n";
+ break;
+ }
+ parser = gst_element_factory_make("mp3parse", "audiosink");
+ if ( !parser )
+ {
+ m_error_message += "failed to create Gstreamer element mp3parse\n";
+ break;
+ }
+ sink = gst_element_factory_make("dvbaudiosink", "audiosink2");
+ if ( !sink )
+ m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+ else
+ all_ok = 1;
+ break;
+ }
+ case atAAC:
+ {
+ if ( !audiodemux )
+ {
+ m_error_message += "cannot parse raw AAC audio\n";
+ break;
+ }
+ sink = gst_element_factory_make("dvbaudiosink", "audiosink");
+ if (!sink)
+ m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+ else
+ all_ok = 1;
+ break;
+ }
+ case atAC3:
+ {
+ if ( !audiodemux )
+ {
+ m_error_message += "cannot parse raw AC3 audio\n";
+ break;
+ }
+ sink = gst_element_factory_make("dvbaudiosink", "audiosink");
+ if ( !sink )
+ m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+ else
+ all_ok = 1;
+ break;
+ }
+ default:
+ { /* filesrc -> decodebin -> audioconvert -> capsfilter -> alsasink */
+ decoder = gst_element_factory_make ("decodebin", "decoder");
+ if (!decoder)
+ m_error_message += "failed to create Gstreamer element decodebin\n";
+
+ conv = gst_element_factory_make ("audioconvert", "converter");
+ if (!conv)
+ m_error_message += "failed to create Gstreamer element audioconvert\n";
+
+ flt = gst_element_factory_make ("capsfilter", "flt");
+ if (!flt)
+ m_error_message += "failed to create Gstreamer element capsfilter\n";
+
+ /* for some reasons, we need to set the sample format to depth/width=16, because auto negotiation doesn't work. */
+ /* endianness, however, is not required to be set anymore. */
+ if (flt)
+ {
+ GstCaps *caps = gst_caps_new_simple("audio/x-raw-int", /* "endianness", G_TYPE_INT, 4321, */ "depth", G_TYPE_INT, 16, "width", G_TYPE_INT, 16, /*"channels", G_TYPE_INT, 2, */NULL);
+ g_object_set (G_OBJECT (flt), "caps", caps, NULL);
+ gst_caps_unref(caps);
+ }
+
+ sink = gst_element_factory_make ("alsasink", "alsa-output");
+ if (!sink)
+ m_error_message += "failed to create Gstreamer element alsasink\n";
+
+ if (source && decoder && conv && sink)
+ all_ok = 1;
+ break;
+ }
+ }
+
}
-
if (m_gst_pipeline && all_ok)
{
gst_bus_set_sync_handler(gst_pipeline_get_bus (GST_PIPELINE (m_gst_pipeline)), gstBusSyncHandler, this);
- if (is_AudioCD)
+ if ( sourceinfo.containertype == ctCDA )
{
queue_audio = gst_element_factory_make("queue", "queue_audio");
g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
gst_bin_add_many (GST_BIN (m_gst_pipeline), source, queue_audio, conv, sink, NULL);
gst_element_link_many(source, queue_audio, conv, sink, NULL);
}
- else if (is_audio)
- {
- queue_audio = gst_element_factory_make("queue", "queue_audio");
-
- g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this);
- g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this);
-
- if (!is_mp3)
- g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
-
- /* gst_bin will take the 'floating references' */
- gst_bin_add_many (GST_BIN (m_gst_pipeline),
- source, queue_audio, decoder, NULL);
-
- /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */
- gst_element_link_many(source, queue_audio, decoder, NULL);
-
- /* create audio bin with the audioconverter, the capsfilter and the audiosink */
- audio = gst_bin_new ("audiobin");
-
- GstPad *audiopad = gst_element_get_static_pad (conv, "sink");
- gst_bin_add_many(GST_BIN(audio), conv, flt, sink, (char*)0);
- gst_element_link_many(conv, flt, sink, (char*)0);
- gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad));
- gst_object_unref(audiopad);
- gst_bin_add (GST_BIN(m_gst_pipeline), audio);
- /* in mad's case, we can directly connect the decoder to the audiobin. otherwise, we do this in gstCBnewPad */
- if (is_mp3)
- gst_element_link(decoder, audio);
-
- } else /* is_video */
+ else if ( sourceinfo.is_video )
{
char srt_filename[strlen(filename)+1];
strncpy(srt_filename,filename,strlen(filename)-3);
eDebug("subtitle file found: %s",srt_filename);
GstElement *subsource = gst_element_factory_make ("filesrc", "srt_source");
g_object_set (G_OBJECT (subsource), "location", srt_filename, NULL);
- GstElement *parser = gst_element_factory_make("subparse", "parse_subtitles");
- GstElement *switch_subtitles = gst_element_factory_make ("input-selector", "switch_subtitles");
- GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles");
- gst_bin_add_many(GST_BIN (m_gst_pipeline), subsource, switch_subtitles, parser, sink, NULL);
- gst_element_link(subsource, switch_subtitles);
- gst_element_link(switch_subtitles, parser);
- gst_element_link(parser, sink);
- g_object_set (G_OBJECT(switch_subtitles), "select-all", TRUE, NULL);
- g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
- g_object_set (G_OBJECT(sink), "sync", TRUE, NULL);
- g_object_set (G_OBJECT(parser), "subtitle-encoding", "ISO-8859-15", NULL);
- g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), this);
+ gst_bin_add(GST_BIN (m_gst_pipeline), subsource);
+ GstPad *switchpad = gstCreateSubtitleSink(this, stSRT);
+ gst_pad_link(gst_element_get_pad (subsource, "src"), switchpad);
subtitleStream subs;
- subs.language_code = std::string(".srt file");
+ subs.pad = switchpad;
+ subs.type = stSRT;
+ subs.language_code = std::string("und");
m_subtitleStreams.push_back(subs);
}
- gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, NULL);
- switch_audio = gst_element_factory_make ("input-selector", "switch_audio");
- if (switch_audio)
- {
- g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
- gst_bin_add(GST_BIN(m_gst_pipeline), switch_audio);
- gst_element_link(switch_audio, queue_audio);
- }
+ gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, switch_audio, NULL);
- if (is_VCD)
+ if ( sourceinfo.containertype == ctVCD && gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"file-source") )
{
+ eDebug("servicemp3: this is a fake video cd... we use filesrc ! cdxaparse !");
GstElement *cdxaparse = gst_element_factory_make("cdxaparse", "cdxaparse");
gst_bin_add(GST_BIN(m_gst_pipeline), cdxaparse);
gst_element_link(source, cdxaparse);
}
else
gst_element_link(source, videodemux);
+
+ gst_element_link(switch_audio, queue_audio);
gst_element_link(queue_audio, audio);
gst_element_link(queue_video, video);
g_signal_connect(videodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+
+ } else /* is audio*/
+ {
+ if ( decoder )
+ {
+ queue_audio = gst_element_factory_make("queue", "queue_audio");
+
+ g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this);
+ g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this);
+
+ g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
+
+ /* gst_bin will take the 'floating references' */
+ gst_bin_add_many (GST_BIN (m_gst_pipeline),
+ source, queue_audio, decoder, NULL);
+
+ /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */
+ gst_element_link_many(source, queue_audio, decoder, NULL);
+
+ /* create audio bin with the audioconverter, the capsfilter and the audiosink */
+ audio = gst_bin_new ("audiobin");
+
+ GstPad *audiopad = gst_element_get_static_pad (conv, "sink");
+ gst_bin_add_many(GST_BIN(audio), conv, flt, sink, NULL);
+ gst_element_link_many(conv, flt, sink, NULL);
+ gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad));
+ gst_object_unref(audiopad);
+ gst_bin_add (GST_BIN(m_gst_pipeline), audio);
+ }
+ else
+ {
+ gst_bin_add_many (GST_BIN (m_gst_pipeline), source, sink, NULL);
+ if ( parser && id3demux )
+ {
+ gst_bin_add_many (GST_BIN (m_gst_pipeline), parser, id3demux, NULL);
+ gst_element_link(source, id3demux);
+ g_signal_connect(id3demux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+ gst_element_link(parser, sink);
+ }
+ if ( audiodemux )
+ {
+ gst_bin_add (GST_BIN (m_gst_pipeline), audiodemux);
+ g_signal_connect(audiodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+ gst_element_link(source, audiodemux);
+ }
+ audioStream audio;
+ audio.type = sourceinfo.audiotype;
+ m_audioStreams.push_back(audio);
+ }
}
} else
{
+ m_event((iPlayableService*)this, evUser+12);
+
if (m_gst_pipeline)
gst_object_unref(GST_OBJECT(m_gst_pipeline));
if (source)
if (switch_audio)
gst_object_unref(GST_OBJECT(switch_audio));
- eDebug("sorry, can't play.");
+ eDebug("sorry, can't play: %s",m_error_message.c_str());
m_gst_pipeline = 0;
}
-
+
gst_element_set_state (m_gst_pipeline, GST_STATE_PLAYING);
}
{
m_currentTrickRatio = ratio;
if (ratio)
- m_seekTimeout.start(1000, 0);
+ m_seekTimeout->start(1000, 0);
else
- m_seekTimeout.stop();
+ m_seekTimeout->stop();
return 0;
}
if (ppos < 0)
{
ppos = 0;
- m_seekTimeout.stop();
+ m_seekTimeout->stop();
}
if (ppos > len)
{
ppos = 0;
stop();
- m_seekTimeout.stop();
+ m_seekTimeout->stop();
return;
}
seekTo(ppos);
return -1;
if (m_state != stRunning)
return -1;
-
+
GstFormat fmt = GST_FORMAT_TIME;
gint64 len;
if (!gst_element_query_position(m_gst_pipeline, &fmt, &len))
return -1;
-
+
/* len is in nanoseconds. we have 90 000 pts per second. */
pts = len / 11111;
return 0;
switch (w)
{
+ case sVideoHeight: return m_height;
+ case sVideoWidth: return m_width;
+ case sFrameRate: return m_framerate;
+ case sProgressive: return m_progressive;
+ case sAspect: return m_aspect;
case sTitle:
case sArtist:
case sAlbum:
case sTracknumber:
case sGenre:
case sVideoType:
+ case sTimeCreate:
case sUser+12:
return resIsString;
case sCurrentTitle:
std::string eServiceMP3::getInfoString(int w)
{
+ if ( !m_stream_tags )
+ return "";
gchar *tag = 0;
switch (w)
{
case sVideoType:
tag = GST_TAG_VIDEO_CODEC;
break;
+ case sTimeCreate:
+ GDate *date;
+ if (gst_tag_list_get_date(m_stream_tags, GST_TAG_DATE, &date))
+ {
+ gchar res[5];
+ g_date_strftime (res, sizeof(res), "%Y", date);
+ return (std::string)res;
+ }
+ break;
case sUser+12:
return m_error_message;
default:
return "";
}
-
- if (!m_stream_tags || !tag)
+ if ( !tag )
return "";
-
gchar *value;
-
if (gst_tag_list_get_string(m_stream_tags, tag, &value))
{
std::string res = value;
g_free(value);
return res;
}
-
return "";
}
#endif
switch (GST_MESSAGE_TYPE (msg))
{
- case GST_MESSAGE_EOS:
- m_event((iPlayableService*)this, evEOF);
- break;
- case GST_MESSAGE_ERROR:
- {
- gchar *debug;
- GError *err;
-
- gst_message_parse_error (msg, &err, &debug);
- g_free (debug);
- eWarning("Gstreamer error: %s (%i)", err->message, err->code );
- if ( err->domain == GST_STREAM_ERROR && err->code == GST_STREAM_ERROR_DECODE )
+ case GST_MESSAGE_EOS:
+ m_event((iPlayableService*)this, evEOF);
+ break;
+ case GST_MESSAGE_ERROR:
{
- if ( g_strrstr(sourceName, "videosink") )
- m_event((iPlayableService*)this, evUser+11);
+ gchar *debug;
+ GError *err;
+
+ gst_message_parse_error (msg, &err, &debug);
+ g_free (debug);
+ eWarning("Gstreamer error: %s (%i) from %s", err->message, err->code, sourceName );
+ if ( err->domain == GST_STREAM_ERROR )
+ {
+ if ( err->code == GST_STREAM_ERROR_CODEC_NOT_FOUND && g_strrstr(sourceName, "videosink") )
+ m_event((iPlayableService*)this, evUser+11);
+ else if ( err->code == GST_STREAM_ERROR_FAILED && g_strrstr(sourceName, "file-source") )
+ {
+ eWarning("error in tag parsing, linking mp3parse directly to file-sink, bypassing id3demux...");
+ GstElement *source = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"file-source");
+ GstElement *parser = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"audiosink");
+ gst_element_set_state(m_gst_pipeline, GST_STATE_NULL);
+ gst_element_unlink(source, gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"id3demux"));
+ gst_element_link(source, parser);
+ gst_element_set_state (m_gst_pipeline, GST_STATE_PLAYING);
+ }
+ }
+ g_error_free(err);
+ break;
}
- g_error_free(err);
- /* TODO: signal error condition to user */
- break;
- }
- case GST_MESSAGE_TAG:
- {
- GstTagList *tags, *result;
- gst_message_parse_tag(msg, &tags);
-
- result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_PREPEND);
- if (result)
+ case GST_MESSAGE_INFO:
{
- if (m_stream_tags)
- gst_tag_list_free(m_stream_tags);
- m_stream_tags = result;
+ gchar *debug;
+ GError *inf;
+
+ gst_message_parse_info (msg, &inf, &debug);
+ g_free (debug);
+ if ( inf->domain == GST_STREAM_ERROR && inf->code == GST_STREAM_ERROR_DECODE )
+ {
+ if ( g_strrstr(sourceName, "videosink") )
+ m_event((iPlayableService*)this, evUser+14);
+ }
+ g_error_free(inf);
+ break;
}
-
- gchar *g_audiocodec;
- if ( gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_audiocodec) && m_audioStreams.size() == 0 )
+ case GST_MESSAGE_TAG:
{
- GstPad* pad = gst_element_get_pad (GST_ELEMENT(source), "src");
- GstCaps* caps = gst_pad_get_caps(pad);
- GstStructure* str = gst_caps_get_structure(caps, 0);
- if ( !str )
- break;
- audioStream audio;
- audio.type = gstCheckAudioPad(str);
- m_audioStreams.push_back(audio);
+ GstTagList *tags, *result;
+ gst_message_parse_tag(msg, &tags);
+
+ result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_PREPEND);
+ if (result)
+ {
+ if (m_stream_tags)
+ gst_tag_list_free(m_stream_tags);
+ m_stream_tags = result;
+ }
+
+ gchar *g_audiocodec;
+ if ( gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_audiocodec) && m_audioStreams.size() == 0 )
+ {
+ GstPad* pad = gst_element_get_pad (GST_ELEMENT(source), "src");
+ GstCaps* caps = gst_pad_get_caps(pad);
+ GstStructure* str = gst_caps_get_structure(caps, 0);
+ if ( !str )
+ break;
+ audioStream audio;
+ audio.type = gstCheckAudioPad(str);
+ m_audioStreams.push_back(audio);
+ }
+
+ const GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0);
+ if ( gv_image )
+ {
+ GstBuffer *buf_image;
+ buf_image = gst_value_get_buffer (gv_image);
+ int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644);
+ write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image));
+ close(fd);
+ m_event((iPlayableService*)this, evUser+13);
+ }
+
+ gst_tag_list_free(tags);
+ m_event((iPlayableService*)this, evUpdatedInfo);
+ break;
}
- break;
- }
- case GST_MESSAGE_ASYNC_DONE:
- {
- GstTagList *tags;
- for (std::vector<audioStream>::iterator IterAudioStream(m_audioStreams.begin()); IterAudioStream != m_audioStreams.end(); ++IterAudioStream)
+ case GST_MESSAGE_ASYNC_DONE:
{
- if ( IterAudioStream->pad )
+ GstTagList *tags;
+ for (std::vector<audioStream>::iterator IterAudioStream(m_audioStreams.begin()); IterAudioStream != m_audioStreams.end(); ++IterAudioStream)
{
- g_object_get(IterAudioStream->pad, "tags", &tags, NULL);
- gchar *g_language;
- if ( gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) )
+ if ( IterAudioStream->pad )
{
- eDebug("found audio language %s",g_language);
- IterAudioStream->language_code = std::string(g_language);
- g_free (g_language);
+ g_object_get(IterAudioStream->pad, "tags", &tags, NULL);
+ gchar *g_language;
+ if ( tags && gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) )
+ {
+ eDebug("found audio language %s",g_language);
+ IterAudioStream->language_code = std::string(g_language);
+ g_free (g_language);
+ }
}
}
- }
- for (std::vector<subtitleStream>::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream)
- {
- if ( IterSubtitleStream->pad )
+ for (std::vector<subtitleStream>::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream)
{
- g_object_get(IterSubtitleStream->pad, "tags", &tags, NULL);
- gchar *g_language;
- if ( gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) )
+ if ( IterSubtitleStream->pad )
{
- eDebug("found subtitle language %s",g_language);
- IterSubtitleStream->language_code = std::string(g_language);
- g_free (g_language);
+ g_object_get(IterSubtitleStream->pad, "tags", &tags, NULL);
+ gchar *g_language;
+ if ( tags && gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) )
+ {
+ eDebug("found subtitle language %s",g_language);
+ IterSubtitleStream->language_code = std::string(g_language);
+ g_free (g_language);
+ }
}
}
}
- }
- case GST_MESSAGE_ELEMENT:
- {
- if ( gst_is_missing_plugin_message(msg) )
+ case GST_MESSAGE_ELEMENT:
{
- gchar *description = gst_missing_plugin_message_get_description(msg);
- if ( description )
+ if ( gst_is_missing_plugin_message(msg) )
{
- m_error_message = description;
- g_free(description);
- m_event((iPlayableService*)this, evUser+12);
+ gchar *description = gst_missing_plugin_message_get_description(msg);
+ if ( description )
+ {
+ m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n";
+ g_free(description);
+ m_event((iPlayableService*)this, evUser+12);
+ }
+ }
+ else if (const GstStructure *msgstruct = gst_message_get_structure(msg))
+ {
+ const gchar *eventname = gst_structure_get_name(msgstruct);
+ if ( eventname )
+ {
+ if (!strcmp(eventname, "eventSizeChanged") || !strcmp(eventname, "eventSizeAvail"))
+ {
+ gst_structure_get_int (msgstruct, "aspect_ratio", &m_aspect);
+ gst_structure_get_int (msgstruct, "width", &m_width);
+ gst_structure_get_int (msgstruct, "height", &m_height);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoSizeChanged);
+ }
+ else if (!strcmp(eventname, "eventFrameRateChanged") || !strcmp(eventname, "eventFrameRateAvail"))
+ {
+ gst_structure_get_int (msgstruct, "frame_rate", &m_framerate);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoFramerateChanged);
+ }
+ else if (!strcmp(eventname, "eventProgressiveChanged") || !strcmp(eventname, "eventProgressiveAvail"))
+ {
+ gst_structure_get_int (msgstruct, "progressive", &m_progressive);
+ if (strstr(eventname, "Changed"))
+ m_event((iPlayableService*)this, evVideoProgressiveChanged);
+ }
+ }
}
}
- }
- default:
- break;
+ default:
+ break;
}
g_free (sourceName);
}
}
else
{
- gst_pad_link(pad, gst_element_get_static_pad(gst_bin_get_by_name(pipeline,"queue_audio"), "sink"));
- _this->m_audioStreams.push_back(audio);
+ GstElement *queue_audio = gst_bin_get_by_name(pipeline , "queue_audio");
+ if ( queue_audio )
+ {
+ gst_pad_link(pad, gst_element_get_static_pad(queue_audio, "sink"));
+ _this->m_audioStreams.push_back(audio);
+ }
+ else
+ gst_pad_link(pad, gst_element_get_static_pad(gst_bin_get_by_name(pipeline , "audiosink"), "sink"));
}
}
if (g_strrstr(type,"video"))
}
if (g_strrstr(type,"application/x-ssa") || g_strrstr(type,"application/x-ass"))
{
- GstElement *switch_subtitles = gst_bin_get_by_name(pipeline,"switch_subtitles");
- if ( !switch_subtitles )
- {
- switch_subtitles = gst_element_factory_make ("input-selector", "switch_subtitles");
- if ( !switch_subtitles )
- return;
- GstElement *parser = gst_element_factory_make("ssaparse", "parse_subtitles");
- GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles");
- gst_bin_add_many(pipeline, switch_subtitles, parser, sink, NULL);
- gst_element_link(switch_subtitles, parser);
- gst_element_link(parser, sink);
- g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
- g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), _this);
- }
- GstPad *sinkpad = gst_element_get_request_pad (switch_subtitles, "sink%d");
- gst_pad_link(pad, sinkpad);
+ GstPad *switchpad = _this->gstCreateSubtitleSink(_this, stSSA);
+ gst_pad_link(pad, switchpad);
subtitleStream subs;
- subs.pad = sinkpad;
+ subs.pad = switchpad;
+ subs.type = stSSA;
_this->m_subtitleStreams.push_back(subs);
}
+ if (g_strrstr(type,"text/plain"))
+ {
+ GstPad *switchpad = _this->gstCreateSubtitleSink(_this, stPlainText);
+ gst_pad_link(pad, switchpad);
+ subtitleStream subs;
+ subs.pad = switchpad;
+ subs.type = stPlainText;
+ _this->m_subtitleStreams.push_back(subs);
+ }
+}
+
+GstPad* eServiceMP3::gstCreateSubtitleSink(eServiceMP3* _this, subtype_t type)
+{
+ GstBin *pipeline = GST_BIN(_this->m_gst_pipeline);
+ GstElement *switch_subparse = gst_bin_get_by_name(pipeline,"switch_subparse");
+ if ( !switch_subparse )
+ {
+ switch_subparse = gst_element_factory_make ("input-selector", "switch_subparse");
+ GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles");
+ gst_bin_add_many(pipeline, switch_subparse, sink, NULL);
+ gst_element_link(switch_subparse, sink);
+ g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
+ g_object_set (G_OBJECT(sink), "sync", TRUE, NULL);
+ g_object_set (G_OBJECT(sink), "async", FALSE, NULL);
+ g_signal_connect(sink, "handoff", G_CALLBACK(_this->gstCBsubtitleAvail), _this);
+
+ // order is essential since requested sink pad names can't be explicitely chosen
+ GstElement *switch_substream_plain = gst_element_factory_make ("input-selector", "switch_substream_plain");
+ gst_bin_add(pipeline, switch_substream_plain);
+ GstPad *sinkpad_plain = gst_element_get_request_pad (switch_subparse, "sink%d");
+ gst_pad_link(gst_element_get_pad (switch_substream_plain, "src"), sinkpad_plain);
+
+ GstElement *switch_substream_ssa = gst_element_factory_make ("input-selector", "switch_substream_ssa");
+ GstElement *ssaparse = gst_element_factory_make("ssaparse", "ssaparse");
+ gst_bin_add_many(pipeline, switch_substream_ssa, ssaparse, NULL);
+ GstPad *sinkpad_ssa = gst_element_get_request_pad (switch_subparse, "sink%d");
+ gst_element_link(switch_substream_ssa, ssaparse);
+ gst_pad_link(gst_element_get_pad (ssaparse, "src"), sinkpad_ssa);
+
+ GstElement *switch_substream_srt = gst_element_factory_make ("input-selector", "switch_substream_srt");
+ GstElement *srtparse = gst_element_factory_make("subparse", "srtparse");
+ gst_bin_add_many(pipeline, switch_substream_srt, srtparse, NULL);
+ GstPad *sinkpad_srt = gst_element_get_request_pad (switch_subparse, "sink%d");
+ gst_element_link(switch_substream_srt, srtparse);
+ gst_pad_link(gst_element_get_pad (srtparse, "src"), sinkpad_srt);
+ g_object_set (G_OBJECT(srtparse), "subtitle-encoding", "ISO-8859-15", NULL);
+ }
+
+ switch (type)
+ {
+ case stSSA:
+ return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_ssa"), "sink%d");
+ case stSRT:
+ return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_srt"), "sink%d");
+ case stPlainText:
+ default:
+ break;
+ }
+ return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_plain"), "sink%d");
}
void eServiceMP3::gstCBfilterPadAdded(GstElement *filter, GstPad *pad, gpointer user_data)
void eServiceMP3::gstCBsubtitleAvail(GstElement *element, GstBuffer *buffer, GstPad *pad, gpointer user_data)
{
- const unsigned char *text = (unsigned char *)GST_BUFFER_DATA(buffer);
- eDebug("gstCBsubtitleAvail: %s",text);
+ gint64 duration_ns = GST_BUFFER_DURATION(buffer);
+ size_t len = GST_BUFFER_SIZE(buffer);
+ unsigned char tmp[len+1];
+ memcpy(tmp, GST_BUFFER_DATA(buffer), len);
+ tmp[len] = 0;
+ eDebug("gstCBsubtitleAvail: %s", tmp);
eServiceMP3 *_this = (eServiceMP3*)user_data;
if ( _this->m_subtitle_widget )
{
- eDVBTeletextSubtitlePage page;
+ ePangoSubtitlePage page;
gRGB rgbcol(0xD0,0xD0,0xD0);
- page.m_elements.push_back(eDVBTeletextSubtitlePageElement(rgbcol, (const char*)text));
+ page.m_elements.push_back(ePangoSubtitlePageElement(rgbcol, (const char*)tmp));
+ page.m_timeout = duration_ns / 1000000;
(_this->m_subtitle_widget)->setPage(page);
}
}
RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple)
{
- eDebug("eServiceMP3::enableSubtitles");
-
ePyObject entry;
int tuplesize = PyTuple_Size(tuple);
int pid;
+ int type;
gint nb_sources;
GstPad *active_pad;
- GstElement *switch_subtitles = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_subtitles");
+ GstElement *switch_substream = NULL;
+ GstElement *switch_subparse = gst_bin_get_by_name (GST_BIN(m_gst_pipeline), "switch_subparse");
if (!PyTuple_Check(tuple))
goto error_out;
if (!PyInt_Check(entry))
goto error_out;
pid = PyInt_AsLong(entry);
+ entry = PyTuple_GET_ITEM(tuple, 2);
+ if (!PyInt_Check(entry))
+ goto error_out;
+ type = PyInt_AsLong(entry);
+
+ switch ((subtype_t)type)
+ {
+ case stPlainText:
+ switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_plain");
+ break;
+ case stSSA:
+ switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_ssa");
+ break;
+ case stSRT:
+ switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_srt");
+ break;
+ default:
+ goto error_out;
+ }
m_subtitle_widget = new eSubtitleWidget(parent);
m_subtitle_widget->resize(parent->size()); /* full size */
- if ( !switch_subtitles )
+ if ( !switch_substream )
{
eDebug("can't switch subtitle tracks! gst-plugin-selector needed");
return -2;
}
- g_object_get (G_OBJECT (switch_subtitles), "n-pads", &nb_sources, NULL);
+ g_object_get (G_OBJECT (switch_substream), "n-pads", &nb_sources, NULL);
if ( (unsigned int)pid >= m_subtitleStreams.size() || pid >= nb_sources || (unsigned int)m_currentSubtitleStream >= m_subtitleStreams.size() )
return -2;
- char sinkpad[8];
+ g_object_get (G_OBJECT (switch_subparse), "n-pads", &nb_sources, NULL);
+ if ( type < 0 || type >= nb_sources )
+ return -2;
+
+ char sinkpad[6];
+ sprintf(sinkpad, "sink%d", type);
+ g_object_set (G_OBJECT (switch_subparse), "active-pad", gst_element_get_pad (switch_subparse, sinkpad), NULL);
sprintf(sinkpad, "sink%d", pid);
- g_object_set (G_OBJECT (switch_subtitles), "active-pad", gst_element_get_pad (switch_subtitles, sinkpad), NULL);
- g_object_get (G_OBJECT (switch_subtitles), "active-pad", &active_pad, NULL);
- gchar *name;
- name = gst_pad_get_name (active_pad);
- eDebug ("switched subtitles to (%s)", name);
- g_free(name);
+ g_object_set (G_OBJECT (switch_substream), "active-pad", gst_element_get_pad (switch_substream, sinkpad), NULL);
m_currentSubtitleStream = pid;
return 0;
error_out:
eDebug("enableSubtitles needs a tuple as 2nd argument!\n"
- "for gst subtitles (2, subtitle_stream_count)");
+ "for gst subtitles (2, subtitle_stream_count, subtitle_type)");
return -1;
}
eDebug("eServiceMP3::getSubtitleList");
ePyObject l = PyList_New(0);
- int stream_count = 0;
+ int stream_count[sizeof(subtype_t)];
+ for ( unsigned int i = 0; i < sizeof(subtype_t); i++ )
+ stream_count[i] = 0;
for (std::vector<subtitleStream>::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream)
{
+ subtype_t type = IterSubtitleStream->type;
ePyObject tuple = PyTuple_New(5);
PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(2));
- PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(stream_count));
- PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(0));
+ PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(stream_count[type]));
+ PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(int(type)));
PyTuple_SET_ITEM(tuple, 3, PyInt_FromLong(0));
PyTuple_SET_ITEM(tuple, 4, PyString_FromString((IterSubtitleStream->language_code).c_str()));
PyList_Append(l, tuple);
Py_DECREF(tuple);
- stream_count++;
+ stream_count[type]++;
}
-
return l;
}