#include <string>
#include <lib/service/servicemp3.h>
#include <lib/service/service.h>
+#include <lib/components/file_eraser.h>
#include <lib/base/init_num.h>
#include <lib/base/init.h>
#include <gst/gst.h>
+#include <sys/stat.h>
+/* for subtitles */
+#include <lib/gui/esubtitle.h>
// eServiceFactoryMP3
return 0;
}
-RESULT eServiceFactoryMP3::offlineOperations(const eServiceReference &, ePtr<iServiceOfflineOperations> &ptr)
+class eMP3ServiceOfflineOperations: public iServiceOfflineOperations
{
- ptr = 0;
- return -1;
+ DECLARE_REF(eMP3ServiceOfflineOperations);
+ eServiceReference m_ref;
+public:
+ eMP3ServiceOfflineOperations(const eServiceReference &ref);
+
+ RESULT deleteFromDisk(int simulate);
+ RESULT getListOfFilenames(std::list<std::string> &);
+};
+
+DEFINE_REF(eMP3ServiceOfflineOperations);
+
+eMP3ServiceOfflineOperations::eMP3ServiceOfflineOperations(const eServiceReference &ref): m_ref((const eServiceReference&)ref)
+{
+}
+
+RESULT eMP3ServiceOfflineOperations::deleteFromDisk(int simulate)
+{
+ if (simulate)
+ return 0;
+ else
+ {
+ std::list<std::string> res;
+ if (getListOfFilenames(res))
+ return -1;
+
+ eBackgroundFileEraser *eraser = eBackgroundFileEraser::getInstance();
+ if (!eraser)
+ eDebug("FATAL !! can't get background file eraser");
+
+ for (std::list<std::string>::iterator i(res.begin()); i != res.end(); ++i)
+ {
+ eDebug("Removing %s...", i->c_str());
+ if (eraser)
+ eraser->erase(i->c_str());
+ else
+ ::unlink(i->c_str());
+ }
+
+ return 0;
+ }
+}
+
+RESULT eMP3ServiceOfflineOperations::getListOfFilenames(std::list<std::string> &res)
+{
+ res.clear();
+ res.push_back(m_ref.path);
+ return 0;
}
+RESULT eServiceFactoryMP3::offlineOperations(const eServiceReference &ref, ePtr<iServiceOfflineOperations> &ptr)
+{
+ ptr = new eMP3ServiceOfflineOperations(ref);
+ return 0;
+}
+
// eStaticServiceMP3Info
eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eApp, 1)
{
m_stream_tags = 0;
+ m_audioStreams.clear();
+ m_subtitleStreams.clear();
m_currentAudioStream = 0;
+ m_currentSubtitleStream = 0;
+ m_subtitle_widget = 0;
+ m_currentTrickRatio = 0;
+ CONNECT(m_seekTimeout.timeout, eServiceMP3::seekTimeoutCB);
CONNECT(m_pump.recv_msg, eServiceMP3::gstPoll);
GstElement *source = 0;
if (!ext)
ext = filename;
- int is_mpeg_ps = !(strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin"));
+ int is_mpeg_ps = !(strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat"));
int is_mpeg_ts = !strcasecmp(ext, ".ts");
int is_matroska = !strcasecmp(ext, ".mkv");
int is_avi = !strcasecmp(ext, ".avi");
eDebug("filename: %s, is_mpeg_ps: %d, is_mpeg_ts: %d, is_video: %d, is_streaming: %d, is_mp3: %d, is_matroska: %d, is_avi: %d, is_AudioCD: %d", filename, is_mpeg_ps, is_mpeg_ts, is_video, is_streaming, is_mp3, is_matroska, is_avi, is_AudioCD);
int is_audio = !is_video;
-
- int all_ok = 0;
-// GError *blubb = NULL;
-// GstElement* m_gst_pipeline = gst_parse_launch("filesrc location=/media/hdd/movie/artehd_2lang.mkv ! matroskademux name=demux demux.audio_00 ! input-selector name=a demux.audio_01 ! a. a. ! queue ! dvbaudiosink", &blubb);
-// return;
+ int all_ok = 0;
m_gst_pipeline = gst_pipeline_new ("mediaplayer");
if (!m_gst_pipeline)
gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad));
gst_object_unref(audiopad);
gst_bin_add (GST_BIN(m_gst_pipeline), audio);
-
/* in mad's case, we can directly connect the decoder to the audiobin. otherwise, we do this in gstCBnewPad */
if (is_mp3)
gst_element_link(decoder, audio);
+ audioStream audioStreamElem;
+ m_audioStreams.push_back(audioStreamElem);
} else /* is_video */
{
+ char srt_filename[strlen(filename)+1];
+ strncpy(srt_filename,filename,strlen(filename)-3);
+ srt_filename[strlen(filename)-3]='\0';
+ strcat(srt_filename, "srt");
+ struct stat buffer;
+ if (stat(srt_filename, &buffer) == 0)
+ {
+ eDebug("subtitle file found: %s",srt_filename);
+ GstElement *subsource;
+ subsource = gst_element_factory_make ("filesrc", "srt_source");
+ g_object_set (G_OBJECT (subsource), "location", filename, NULL);
+ GstElement *parser = gst_element_factory_make("subparse", "srt_parse");
+ eDebug ("subparse = %p", parser);
+ GstElement *sink = gst_element_factory_make("fakesink", "srt_sink");
+ eDebug ("fakesink = %p", sink);
+ g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
+ gst_bin_add_many(GST_BIN (m_gst_pipeline), subsource, parser, sink, NULL);
+ gboolean res = gst_element_link(subsource, parser);
+ eDebug ("parser link = %d", res);
+ res = gst_element_link(parser, sink);
+ eDebug ("sink link = %d", res);
+ g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), this);
+ subtitleStream subs;
+ subs.element = sink;
+ m_subtitleStreams.push_back(subs);
+ }
+ else
+ eDebug("subtitle file not found: %s",srt_filename);
+
+ gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, NULL);
switch_audio = gst_element_factory_make ("input-selector", "switch_audio");
- g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
- gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, switch_audio, audio, queue_audio, video, queue_video, NULL);
+ if (switch_audio)
+ {
+ g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
+ gst_bin_add(GST_BIN(m_gst_pipeline), switch_audio);
+ gst_element_link(switch_audio, queue_audio);
+ }
gst_element_link(source, videodemux);
- gst_element_link(switch_audio, queue_audio);
gst_element_link(queue_audio, audio);
gst_element_link(queue_video, video);
g_signal_connect(videodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
gst_object_unref(GST_OBJECT(queue_video));
if (videodemux)
gst_object_unref(GST_OBJECT(videodemux));
+ if (switch_audio)
+ gst_object_unref(GST_OBJECT(switch_audio));
eDebug("sorry, can't play.");
m_gst_pipeline = 0;
eServiceMP3::~eServiceMP3()
{
+ delete m_subtitle_widget;
if (m_state == stRunning)
stop();
RESULT eServiceMP3::setSlowMotion(int ratio)
{
+ /* we can't do slomo yet */
return -1;
}
RESULT eServiceMP3::setFastForward(int ratio)
{
- return -1;
+ m_currentTrickRatio = ratio;
+ if (ratio)
+ m_seekTimeout.start(1000, 0);
+ else
+ m_seekTimeout.stop();
+ return 0;
}
-
+
+void eServiceMP3::seekTimeoutCB()
+{
+ pts_t ppos, len;
+ getPlayPosition(ppos);
+ getLength(len);
+ ppos += 90000*m_currentTrickRatio;
+
+ if (ppos < 0)
+ {
+ ppos = 0;
+ m_seekTimeout.stop();
+ }
+ if (ppos > len)
+ {
+ ppos = 0;
+ stop();
+ m_seekTimeout.stop();
+ return;
+ }
+ seekTo(ppos);
+}
+
// iPausableService
RESULT eServiceMP3::pause()
{
if (!m_gst_pipeline)
return -1;
- gst_element_set_state(m_gst_pipeline, GST_STATE_PAUSED);
+ GstStateChangeReturn res = gst_element_set_state(m_gst_pipeline, GST_STATE_PAUSED);
+ if (res == GST_STATE_CHANGE_ASYNC)
+ {
+ pts_t ppos;
+ getPlayPosition(ppos);
+ seekTo(ppos);
+ }
return 0;
}
{
if (!m_gst_pipeline)
return -1;
- gst_element_set_state(m_gst_pipeline, GST_STATE_PLAYING);
+
+ GstStateChangeReturn res;
+ res = gst_element_set_state(m_gst_pipeline, GST_STATE_PLAYING);
return 0;
}
if (!m_gst_pipeline)
return -1;
- pause();
-
pts_t ppos;
getPlayPosition(ppos);
ppos += to * direction;
ppos = 0;
seekTo(ppos);
- unpause();
-
return 0;
}
RESULT eServiceMP3::setTrickmode(int trick)
{
- /* trickmode currently doesn't make any sense for us. */
+ /* trickmode is not yet supported by our dvbmediasinks. */
return -1;
}
return 0;
}
+RESULT eServiceMP3::subtitle(ePtr<iSubtitleOutput> &ptr)
+{
+ ptr = this;
+ return 0;
+}
+
int eServiceMP3::getNumberOfTracks()
{
- eDebug("eServiceMP3::getNumberOfTracks()=%i",m_audioStreams.size());
return m_audioStreams.size();
}
int eServiceMP3::getCurrentTrack()
{
- eDebug("eServiceMP3::getCurrentTrack()=%i",m_currentAudioStream);
return m_currentAudioStream;
}
RESULT eServiceMP3::selectTrack(unsigned int i)
{
- eDebug("eServiceMP3::selectTrack(%i)",i);
+ int ret = selectAudioStream(i);
+ /* flush */
+ pts_t ppos;
+ getPlayPosition(ppos);
+ seekTo(ppos);
- GstPadLinkReturn ret;
+ return ret;
+}
+
+int eServiceMP3::selectAudioStream(int i)
+{
gint nb_sources;
GstPad *active_pad;
-
GstElement *selector = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_audio");
+ if ( !selector)
+ {
+ eDebug("can't switch audio tracks! gst-plugin-selector needed");
+ return -1;
+ }
g_object_get (G_OBJECT (selector), "n-pads", &nb_sources, NULL);
- g_object_get (G_OBJECT (selector), "active-pad", &active_pad, NULL);
-
- if ( i >= m_audioStreams.size() || i >= nb_sources || m_currentAudioStream >= m_audioStreams.size() )
+ if ( (unsigned int)i >= m_audioStreams.size() || i >= nb_sources || (unsigned int)m_currentAudioStream >= m_audioStreams.size() )
return -2;
-
char sinkpad[8];
sprintf(sinkpad, "sink%d", i);
-
g_object_set (G_OBJECT (selector), "active-pad", gst_element_get_pad (selector, sinkpad), NULL);
g_object_get (G_OBJECT (selector), "active-pad", &active_pad, NULL);
-
gchar *name;
name = gst_pad_get_name (active_pad);
eDebug ("switched audio to (%s)", name);
-
+ g_free(name);
m_currentAudioStream = i;
return 0;
}
info.m_description = "AC3";
else if (m_audioStreams[i].type == audioStream::atAAC)
info.m_description = "AAC";
- else if (m_audioStreams[i].type == audioStream::atDTS)
+ else if (m_audioStreams[i].type == audioStream::atDTS)
info.m_description = "DTS";
+ else if (m_audioStreams[i].type == audioStream::atPCM)
+ info.m_description = "PCM";
+ else if (m_audioStreams[i].type == audioStream::atOGG)
+ info.m_description = "OGG";
else
info.m_description = "???";
if (info.m_language.empty())
GstObject *source;
source = GST_MESSAGE_SRC(msg);
- sourceName = gst_object_get_name(source);
+ sourceName = gst_object_get_name(source);
if (gst_message_get_structure(msg))
{
gst_tag_list_free(m_stream_tags);
m_stream_tags = result;
}
-
gchar *g_audiocodec;
- if (gst_tag_list_get_string(m_stream_tags, GST_TAG_AUDIO_CODEC, &g_audiocodec))
+ if (gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_audiocodec) && m_audioStreams.size())
{
- static int a_str_cnt = 0;
+ std::vector<audioStream>::iterator IterAudioStream = m_audioStreams.begin();
+ while ( IterAudioStream != m_audioStreams.end() && (!IterAudioStream->language_code.empty() || IterAudioStream->type != audioStream::atUnknown))
+ ++IterAudioStream;
if ( g_strrstr(g_audiocodec, "MPEG-1 layer 2") )
- m_audioStreams[a_str_cnt].type = audioStream::atMP2;
+ IterAudioStream->type = audioStream::atMP2;
+ else if ( g_strrstr(g_audiocodec, "MPEG-1 layer 3") )
+ IterAudioStream->type = audioStream::atMP3;
+ else if ( g_strrstr(g_audiocodec, "AAC audio") ) // dont checked if correct
+ IterAudioStream->type = audioStream::atAAC;
+ else if ( g_strrstr(g_audiocodec, "DTS audio") )
+ IterAudioStream->type = audioStream::atDTS;
else if ( g_strrstr(g_audiocodec, "AC-3 audio") )
- m_audioStreams[a_str_cnt].type = audioStream::atAC3;
+ IterAudioStream->type = audioStream::atAC3;
+ else if ( g_strrstr(g_audiocodec, "Uncompressed 16-bit PCM audio") )
+ IterAudioStream->type = audioStream::atPCM;
+ else
+ eDebug("unknown audiocodec '%s'!", g_audiocodec);
gchar *g_language;
- if ( gst_tag_list_get_string(m_stream_tags, GST_TAG_LANGUAGE_CODE, &g_language) )
- {
- m_audioStreams[a_str_cnt].language_code = std::string(g_language);
- g_free (g_language);
- }
+ if ( gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) )
+ IterAudioStream->language_code = std::string(g_language);
+ g_free (g_language);
g_free (g_audiocodec);
- a_str_cnt++;
}
break;
}
void eServiceMP3::gstCBpadAdded(GstElement *decodebin, GstPad *pad, gpointer user_data)
{
eServiceMP3 *_this = (eServiceMP3*)user_data;
+ GstBin *pipeline = GST_BIN(_this->m_gst_pipeline);
gchar *name;
name = gst_pad_get_name (pad);
eDebug ("A new pad %s was created", name);
if (g_strrstr(name,"audio")) // mpegdemux, matroskademux, avidemux use video_nn with n=0,1,.., flupsdemux uses stream id
{
+ GstElement *selector = gst_bin_get_by_name(pipeline , "switch_audio" );
audioStream audio;
audio.pad = pad;
_this->m_audioStreams.push_back(audio);
- GstElement *selector = gst_bin_get_by_name( GST_BIN(_this->m_gst_pipeline), "switch_audio" );
- GstPadLinkReturn ret = gst_pad_link(pad, gst_element_get_request_pad (selector, "sink%d"));
- if ( _this->m_audioStreams.size() == 1 )
- {
- _this->selectTrack(_this->m_audioStreams.size()-1);
- gst_element_set_state (_this->m_gst_pipeline, GST_STATE_PLAYING);
+ if ( selector )
+ {
+ gst_pad_link(pad, gst_element_get_request_pad (selector, "sink%d"));
+ if ( _this->m_audioStreams.size() == 1 )
+ {
+ _this->selectAudioStream(0);
+ gst_element_set_state (_this->m_gst_pipeline, GST_STATE_PLAYING);
+ }
+ else
+ g_object_set (G_OBJECT (selector), "select-all", FALSE, NULL);
}
else
- g_object_set (G_OBJECT (selector), "select-all", FALSE, NULL);
+ gst_pad_link(pad, gst_element_get_static_pad(gst_bin_get_by_name(pipeline,"queue_audio"), "sink"));
}
if (g_strrstr(name,"video"))
{
- GstElement *video = gst_bin_get_by_name(GST_BIN (_this->m_gst_pipeline),"queue_video");
- gst_pad_link(pad, gst_element_get_static_pad (video, "sink"));
+ gst_pad_link(pad, gst_element_get_static_pad(gst_bin_get_by_name(pipeline,"queue_video"), "sink"));
+ }
+ if (g_strrstr(name,"subtitle"))
+ {
+// GstCaps *caps;
+// const GstStructure *structure;
+// caps = gst_pad_get_caps(name);
+// structure = gst_caps_get_structure(caps, 0);
+ char elemname[17];
+ sprintf(elemname, "%s_pars", name);
+ GstElement *parser = gst_element_factory_make("ssaparse", elemname);
+ eDebug ("ssaparse %s = %p", elemname, parser);
+ sprintf(elemname, "%s_sink", name);
+ GstElement *sink = gst_element_factory_make("fakesink", elemname);
+ eDebug ("fakesink %s = %p", elemname, sink);
+ g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
+ gst_bin_add_many(pipeline, parser, sink, NULL);
+ gboolean res = gst_pad_link(pad, gst_element_get_static_pad(parser, "sink"));
+ eDebug ("parser link = %d", res);
+ res = gst_element_link(parser, sink);
+ eDebug ("sink link = %d", res);
+ g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), _this);
+ subtitleStream subs;
+ subs.element = sink;
+ _this->m_subtitleStreams.push_back(subs);
}
g_free (name);
}
#else
#warning gstreamer not available, not building media player
#endif
+
+void eServiceMP3::gstCBsubtitleAvail(GstElement *element, GstBuffer *buffer, GstPad *pad, gpointer user_data)
+{
+ const unsigned char *text = (unsigned char *)GST_BUFFER_DATA(buffer);
+ eServiceMP3 *_this = (eServiceMP3*)user_data;
+ gchar *sourceName;
+ sourceName = gst_object_get_name(GST_OBJECT(element));
+ if ( _this->m_subtitle_widget && _this->m_subtitleStreams.at(_this->m_currentSubtitleStream).element == element)
+ {
+ eDVBTeletextSubtitlePage page;
+ gRGB rgbcol(0xD0,0xD0,0xD0);
+ page.m_elements.push_back(eDVBTeletextSubtitlePageElement(rgbcol, (const char*)text));
+ (_this->m_subtitle_widget)->setPage(page);
+ }
+ else
+ eDebug("on inactive element: %s (%p) saw subtitle: %s",sourceName, element, (const char*)text);
+}
+
+RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple)
+{
+ eDebug("eServiceMP3::enableSubtitles");
+
+ ePyObject entry;
+ int tuplesize = PyTuple_Size(tuple);
+ int type = 0;
+ int pid;
+
+ if (!PyTuple_Check(tuple))
+ goto error_out;
+
+ if (tuplesize < 1)
+ goto error_out;
+
+ entry = PyTuple_GET_ITEM(tuple, 0);
+
+ if (!PyInt_Check(entry))
+ goto error_out;
+
+ type = PyInt_AsLong(entry);
+
+ entry = PyTuple_GET_ITEM(tuple, 1);
+ if (!PyInt_Check(entry))
+ goto error_out;
+ pid = PyInt_AsLong(entry);
+
+ m_subtitle_widget = new eSubtitleWidget(parent);
+ m_subtitle_widget->resize(parent->size()); /* full size */
+ m_currentSubtitleStream = pid;
+
+ return 0;
+error_out:
+ eDebug("enableSubtitles needs a tuple as 2nd argument!\n"
+ "for gst subtitles (2, subtitle_stream_count)");
+ return -1;
+}
+
+RESULT eServiceMP3::disableSubtitles(eWidget *parent)
+{
+ eDebug("eServiceMP3::disableSubtitles");
+ delete m_subtitle_widget;
+ m_subtitle_widget = 0;
+ return 0;
+}
+
+PyObject *eServiceMP3::getCachedSubtitle()
+{
+ eDebug("eServiceMP3::eDVBServicePlay");
+ Py_RETURN_NONE;
+}
+
+PyObject *eServiceMP3::getSubtitleList()
+{
+ eDebug("eServiceMP3::getSubtitleList");
+
+ ePyObject l = PyList_New(0);
+ gchar *sourceName;
+ int stream_count = 0;
+
+ for (std::vector<subtitleStream>::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream)
+ {
+ ePyObject tuple = PyTuple_New(5);
+ PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(2));
+ PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(stream_count));
+ PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(0));
+ PyTuple_SET_ITEM(tuple, 3, PyInt_FromLong(0));
+ sourceName = gst_object_get_name(GST_OBJECT (IterSubtitleStream->element));
+ PyTuple_SET_ITEM(tuple, 4, PyString_FromString(sourceName));
+ PyList_Append(l, tuple);
+ Py_DECREF(tuple);
+ stream_count++;
+ }
+
+ return l;
+}