static void tsmf_gstreamer_need_data(GstAppSrc *src, guint length, gpointer user_data) { TSMFGstreamerDecoder* mdecoder = user_data; (void) mdecoder; DEBUG_TSMF("%s length=%lu", get_type(mdecoder), length); }
static void tsmf_pulse_stream_request_callback(pa_stream *stream, size_t length, void *userdata) { TSMFPulseAudioDevice *pulse = (TSMFPulseAudioDevice *) userdata; DEBUG_TSMF("%d", (int) length); pa_threaded_mainloop_signal(pulse->mainloop, 0); }
static void tsmf_gstreamer_control(ITSMFDecoder* decoder, ITSMFControlMsg control_msg, UINT32 *arg) { TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder; if (!mdecoder) return; if (control_msg == Control_Pause) { DEBUG_TSMF("Control_Pause %s", get_type(mdecoder)); if (mdecoder->paused) { WLog_ERR(TAG, "%s: Ignoring control PAUSE, already received!", get_type(mdecoder)); return; } tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PAUSED); mdecoder->paused = TRUE; if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) tsmf_window_pause(mdecoder); } else if (control_msg == Control_Resume) { DEBUG_TSMF("Control_Resume %s", get_type(mdecoder)); if (!mdecoder->paused && !mdecoder->shutdown) { WLog_ERR(TAG, "%s: Ignoring control RESUME, already received!", get_type(mdecoder)); return; } mdecoder->paused = FALSE; mdecoder->shutdown = FALSE; if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) tsmf_window_resume(mdecoder); tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING); } else if (control_msg == Control_Stop) { DEBUG_TSMF("Control_Stop %s", get_type(mdecoder)); if (mdecoder->shutdown) { WLog_ERR(TAG, "%s: Ignoring control STOP, already received!", get_type(mdecoder)); return; } mdecoder->shutdown = TRUE; /* Reset stamps, flush buffers, etc */ tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PAUSED); if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) tsmf_window_pause(mdecoder); gst_app_src_end_of_stream((GstAppSrc *)mdecoder->src); } else WLog_ERR(TAG, "Unknown control message %08x", control_msg); }
static void tsmf_gstreamer_enough_data(GstAppSrc *src, gpointer user_data) { TSMFGstreamerDecoder* mdecoder = user_data; (void) mdecoder; DEBUG_TSMF("%s", get_type(mdecoder)); }
static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UINT32 data_size, UINT32 extensions, UINT64 start_time, UINT64 end_time, UINT64 duration) { GstBuffer *gst_buf; TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder; UINT64 sample_time = tsmf_gstreamer_timestamp_ms_to_gst(start_time); UINT64 sample_duration = tsmf_gstreamer_timestamp_ms_to_gst(duration); if (!mdecoder) { WLog_ERR(TAG, "Decoder not initialized!"); return FALSE; } /* * This function is always called from a stream-specific thread. * It should be alright to block here if necessary. * We don't expect to block here often, since the pipeline should * have more than enough buffering. */ DEBUG_TSMF("%s. Start:(%llu) End:(%llu) Duration:(%llu) Last End:(%llu)", get_type(mdecoder), start_time, end_time, duration, mdecoder->last_sample_end_time); if (mdecoder->gst_caps == NULL) { WLog_ERR(TAG, "tsmf_gstreamer_set_format not called or invalid format."); return FALSE; } if (!mdecoder->src) { WLog_ERR(TAG, "failed to construct pipeline correctly. Unable to push buffer to source element."); return FALSE; } gst_buf = tsmf_get_buffer_from_data(data, data_size); if (gst_buf == NULL) { WLog_ERR(TAG, "tsmf_get_buffer_from_data(%p, %d) failed.", data, data_size); return FALSE; } if (mdecoder->pipeline_start_time_valid) { long long diff = start_time; diff -= mdecoder->last_sample_end_time; if (diff < 0) diff *= -1; /* The pipe is initialized, but there is a discontinuity. * Seek to the start position... */ if (diff > 50) { DEBUG_TSMF("%s seeking to %lld", get_type(mdecoder), start_time); if (!gst_element_seek(mdecoder->pipe, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE, GST_SEEK_TYPE_SET, sample_time, GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE)) { WLog_ERR(TAG, "seek failed"); } mdecoder->pipeline_start_time_valid = 0; } } else { DEBUG_TSMF("%s start time %llu", get_type(mdecoder), sample_time); mdecoder->pipeline_start_time_valid = 1; } #if GST_VERSION_MAJOR > 0 GST_BUFFER_PTS(gst_buf) = sample_time; #else GST_BUFFER_TIMESTAMP(gst_buf) = sample_time; #endif GST_BUFFER_DURATION(gst_buf) = sample_duration; gst_app_src_push_buffer(GST_APP_SRC(mdecoder->src), gst_buf); if (mdecoder->ack_cb) mdecoder->ack_cb(mdecoder->stream, TRUE); mdecoder->last_sample_end_time = end_time; if (GST_STATE(mdecoder->pipe) != GST_STATE_PLAYING) { DEBUG_TSMF("%s: state=%s", get_type(mdecoder), gst_element_state_get_name(GST_STATE(mdecoder->pipe))); if (!mdecoder->paused && !mdecoder->shutdown && mdecoder->ready) tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING); } return TRUE; }
BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder) { const char* appsrc = "appsrc name=source ! decodebin name=decoder !"; const char* video = "autovideoconvert ! videoscale !"; const char* audio = "audioconvert ! audiorate ! audioresample ! volume name=audiovolume !"; char pipeline[1024]; if (!mdecoder) return FALSE; /* TODO: Construction of the pipeline from a string allows easy overwrite with arguments. * The only fixed elements necessary are appsrc and the volume element for audio streams. * The rest could easily be provided in gstreamer pipeline notation from command line. */ if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) snprintf(pipeline, sizeof(pipeline), "%s %s %s name=outsink", appsrc, video, tsmf_platform_get_video_sink()); else snprintf(pipeline, sizeof(pipeline), "%s %s %s name=outsink", appsrc, audio, tsmf_platform_get_audio_sink()); DEBUG_TSMF("pipeline=%s", pipeline); mdecoder->pipe = gst_parse_launch(pipeline, NULL); if (!mdecoder->pipe) { WLog_ERR(TAG, "Failed to create new pipe"); return FALSE; } mdecoder->src = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "source"); if (!mdecoder->src) { WLog_ERR(TAG, "Failed to get appsrc"); return FALSE; } mdecoder->outsink = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "outsink"); if (!mdecoder->outsink) { WLog_ERR(TAG, "Failed to get sink"); return FALSE; } if (mdecoder->media_type != TSMF_MAJOR_TYPE_VIDEO) { mdecoder->volume = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audiovolume"); if (!mdecoder->volume) { WLog_ERR(TAG, "Failed to get volume"); return FALSE; } } tsmf_platform_register_handler(mdecoder); /* AppSrc settings */ GstAppSrcCallbacks callbacks = { tsmf_gstreamer_need_data, tsmf_gstreamer_enough_data, tsmf_gstreamer_seek_data }; g_object_set(mdecoder->src, "format", GST_FORMAT_TIME, NULL); g_object_set(mdecoder->src, "is-live", TRUE, NULL); g_object_set(mdecoder->src, "block", TRUE, NULL); gst_app_src_set_caps((GstAppSrc *) mdecoder->src, mdecoder->gst_caps); gst_app_src_set_callbacks((GstAppSrc *)mdecoder->src, &callbacks, mdecoder, NULL); gst_app_src_set_stream_type((GstAppSrc *) mdecoder->src, GST_APP_STREAM_TYPE_SEEKABLE); tsmf_window_create(mdecoder); tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_READY); tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING); mdecoder->pipeline_start_time_valid = 0; mdecoder->shutdown = 0; GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(mdecoder->pipe), GST_DEBUG_GRAPH_SHOW_ALL, get_type(mdecoder)); return TRUE; }
static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* media_type) { TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder*) decoder; if (!mdecoder) return FALSE; DEBUG_TSMF(""); switch (media_type->MajorType) { case TSMF_MAJOR_TYPE_VIDEO: mdecoder->media_type = TSMF_MAJOR_TYPE_VIDEO; break; case TSMF_MAJOR_TYPE_AUDIO: mdecoder->media_type = TSMF_MAJOR_TYPE_AUDIO; break; default: return FALSE; } switch (media_type->SubType) { case TSMF_SUB_TYPE_WVC1: mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv", "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WVC1", NULL); break; case TSMF_SUB_TYPE_MP4S: mdecoder->gst_caps = gst_caps_new_simple("video/x-divx", "divxversion", G_TYPE_INT, 5, "bitrate", G_TYPE_UINT, media_type->BitRate, "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, NULL); break; case TSMF_SUB_TYPE_MP42: mdecoder->gst_caps = gst_caps_new_simple("video/x-msmpeg", "msmpegversion", G_TYPE_INT, 42, "bitrate", G_TYPE_UINT, media_type->BitRate, "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, NULL); break; case TSMF_SUB_TYPE_MP43: mdecoder->gst_caps = gst_caps_new_simple("video/x-msmpeg", "bitrate", G_TYPE_UINT, media_type->BitRate, "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, NULL); break; case TSMF_SUB_TYPE_WMA9: mdecoder->gst_caps = gst_caps_new_simple("audio/x-wma", "wmaversion", G_TYPE_INT, 3, "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator, "channels", G_TYPE_INT, media_type->Channels, "bitrate", G_TYPE_INT, media_type->BitRate, "depth", G_TYPE_INT, media_type->BitsPerSample, "width", G_TYPE_INT, media_type->BitsPerSample, "block_align", G_TYPE_INT, media_type->BlockAlign, NULL); break; case TSMF_SUB_TYPE_WMA2: mdecoder->gst_caps = gst_caps_new_simple("audio/x-wma", "wmaversion", G_TYPE_INT, 2, "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator, "channels", G_TYPE_INT, media_type->Channels, "bitrate", G_TYPE_INT, media_type->BitRate, "depth", G_TYPE_INT, media_type->BitsPerSample, "width", G_TYPE_INT, media_type->BitsPerSample, "block_align", G_TYPE_INT, media_type->BlockAlign, NULL); break; case TSMF_SUB_TYPE_MP3: mdecoder->gst_caps = gst_caps_new_simple("audio/mpeg", "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 3, "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator, "channels", G_TYPE_INT, media_type->Channels, NULL); break; case TSMF_SUB_TYPE_WMV1: mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv", "bitrate", G_TYPE_UINT, media_type->BitRate, "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, "wmvversion", G_TYPE_INT, 1, NULL); break; case TSMF_SUB_TYPE_WMV2: mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv", "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, "wmvversion", G_TYPE_INT, 2, NULL); break; case TSMF_SUB_TYPE_WMV3: mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv", "bitrate", G_TYPE_UINT, media_type->BitRate, "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, "wmvversion", G_TYPE_INT, 3, NULL); break; case TSMF_SUB_TYPE_AVC1: case TSMF_SUB_TYPE_H264: mdecoder->gst_caps = gst_caps_new_simple("video/x-h264", "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, NULL); break; case TSMF_SUB_TYPE_AC3: mdecoder->gst_caps = gst_caps_new_simple("audio/x-ac3", "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator, "channels", G_TYPE_INT, media_type->Channels, NULL); break; case TSMF_SUB_TYPE_AAC: /* For AAC the pFormat is a HEAACWAVEINFO struct, and the codec data is at the end of it. See http://msdn.microsoft.com/en-us/library/dd757806.aspx */ if (media_type->ExtraData) { media_type->ExtraData += 12; media_type->ExtraDataSize -= 12; } mdecoder->gst_caps = gst_caps_new_simple("audio/mpeg", "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator, "channels", G_TYPE_INT, media_type->Channels, "mpegversion", G_TYPE_INT, 4, NULL); break; case TSMF_SUB_TYPE_MP1A: mdecoder->gst_caps = gst_caps_new_simple("audio/mpeg", "mpegversion", G_TYPE_INT, 1, "channels", G_TYPE_INT, media_type->Channels, NULL); break; case TSMF_SUB_TYPE_MP1V: mdecoder->gst_caps = gst_caps_new_simple("video/mpeg", "mpegversion", G_TYPE_INT, 1, "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, "systemstream", G_TYPE_BOOLEAN, FALSE, NULL); break; case TSMF_SUB_TYPE_YUY2: #if GST_VERSION_MAJOR > 0 mdecoder->gst_caps = gst_caps_new_simple("video/x-raw", "format", G_TYPE_STRING, "YUY2", "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, NULL); #else mdecoder->gst_caps = gst_caps_new_simple("video/x-raw-yuv", "format", G_TYPE_STRING, "YUY2", "width", G_TYPE_INT, media_type->Width, "height", G_TYPE_INT, media_type->Height, NULL); #endif break; case TSMF_SUB_TYPE_MP2V: mdecoder->gst_caps = gst_caps_new_simple("video/mpeg", "mpegversion", G_TYPE_INT, 2, "systemstream", G_TYPE_BOOLEAN, FALSE, NULL); break; case TSMF_SUB_TYPE_MP2A: mdecoder->gst_caps = gst_caps_new_simple("audio/mpeg", "mpegversion", G_TYPE_INT, 2, "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator, "channels", G_TYPE_INT, media_type->Channels, NULL); break; default: WLog_ERR(TAG, "unknown format:(%d).", media_type->SubType); return FALSE; } if (media_type->ExtraDataSize > 0) { GstBuffer *buffer; DEBUG_TSMF("Extra data available (%d)", media_type->ExtraDataSize); buffer = tsmf_get_buffer_from_data(media_type->ExtraData, media_type->ExtraDataSize); if (!buffer) { WLog_ERR(TAG, "could not allocate GstBuffer!"); return FALSE; } gst_caps_set_simple(mdecoder->gst_caps, "codec_data", GST_TYPE_BUFFER, buffer, NULL); } DEBUG_TSMF("%p format '%s'", mdecoder, gst_caps_to_string(mdecoder->gst_caps)); tsmf_platform_set_format(mdecoder); /* Create the pipeline... */ if (!tsmf_gstreamer_pipeline_build(mdecoder)) return FALSE; return TRUE; }