예제 #1
0
static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* media_type)
{
	TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder*) decoder;

	if (!mdecoder)
		return FALSE;

	DEBUG_TSMF("");

	switch (media_type->MajorType)
	{
		case TSMF_MAJOR_TYPE_VIDEO:
			mdecoder->media_type = TSMF_MAJOR_TYPE_VIDEO;
			break;
		case TSMF_MAJOR_TYPE_AUDIO:
			mdecoder->media_type = TSMF_MAJOR_TYPE_AUDIO;
			break;
		default:
			return FALSE;
	}

	switch (media_type->SubType)
	{
		case TSMF_SUB_TYPE_WVC1:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "wmvversion", G_TYPE_INT, 3,
								 "format", G_TYPE_STRING, "WVC1",
								 NULL);
			break;
		case TSMF_SUB_TYPE_MP4S:
			mdecoder->gst_caps =  gst_caps_new_simple("video/x-divx",
								  "divxversion", G_TYPE_INT, 5,
								  "bitrate", G_TYPE_UINT, media_type->BitRate,
								  "width", G_TYPE_INT, media_type->Width,
								  "height", G_TYPE_INT, media_type->Height,
								  NULL);
			break;
		case TSMF_SUB_TYPE_MP42:
			mdecoder->gst_caps =  gst_caps_new_simple("video/x-msmpeg",
								  "msmpegversion", G_TYPE_INT, 42,
								  "bitrate", G_TYPE_UINT, media_type->BitRate,
								  "width", G_TYPE_INT, media_type->Width,
								  "height", G_TYPE_INT, media_type->Height,
								  NULL);
			break;
		case TSMF_SUB_TYPE_MP43:
			mdecoder->gst_caps =  gst_caps_new_simple("video/x-msmpeg",
								  "bitrate", G_TYPE_UINT, media_type->BitRate,
								  "width", G_TYPE_INT, media_type->Width,
								  "height", G_TYPE_INT, media_type->Height,
								  NULL);
			break;
		case TSMF_SUB_TYPE_WMA9:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/x-wma",
								  "wmaversion", G_TYPE_INT, 3,
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  "bitrate", G_TYPE_INT, media_type->BitRate,
								  "depth", G_TYPE_INT, media_type->BitsPerSample,
								  "width", G_TYPE_INT, media_type->BitsPerSample,
								  "block_align", G_TYPE_INT, media_type->BlockAlign,
								  NULL);
			break;
		case TSMF_SUB_TYPE_WMA2:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/x-wma",
								  "wmaversion", G_TYPE_INT, 2,
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  "bitrate", G_TYPE_INT, media_type->BitRate,
								  "depth", G_TYPE_INT, media_type->BitsPerSample,
								  "width", G_TYPE_INT, media_type->BitsPerSample,
								  "block_align", G_TYPE_INT, media_type->BlockAlign,
								  NULL);
			break;
		case TSMF_SUB_TYPE_MP3:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/mpeg",
								  "mpegversion", G_TYPE_INT, 1,
								  "layer", G_TYPE_INT, 3,
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  NULL);
			break;
		case TSMF_SUB_TYPE_WMV1:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
								 "bitrate", G_TYPE_UINT, media_type->BitRate,
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "wmvversion", G_TYPE_INT, 1,
								 NULL);
			break;
		case TSMF_SUB_TYPE_WMV2:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "wmvversion", G_TYPE_INT, 2,
								 NULL);
			break;
		case TSMF_SUB_TYPE_WMV3:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
								 "bitrate", G_TYPE_UINT, media_type->BitRate,
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "wmvversion", G_TYPE_INT, 3,
								 NULL);
			break;
		case TSMF_SUB_TYPE_AVC1:
		case TSMF_SUB_TYPE_H264:
			mdecoder->gst_caps = gst_caps_new_simple("video/x-h264",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 NULL);
			break;
		case TSMF_SUB_TYPE_AC3:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/x-ac3",
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  NULL);
			break;
		case TSMF_SUB_TYPE_AAC:

			/* For AAC the pFormat is a HEAACWAVEINFO struct, and the codec data
			   is at the end of it. See
			   http://msdn.microsoft.com/en-us/library/dd757806.aspx */
			if (media_type->ExtraData)
			{
				media_type->ExtraData += 12;
				media_type->ExtraDataSize -= 12;
			}

			mdecoder->gst_caps = gst_caps_new_simple("audio/mpeg",
								 "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								 "channels", G_TYPE_INT, media_type->Channels,
								 "mpegversion", G_TYPE_INT, 4,
								 NULL);
			break;
		case TSMF_SUB_TYPE_MP1A:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/mpeg",
								  "mpegversion", G_TYPE_INT, 1,
								  "channels", G_TYPE_INT, media_type->Channels,
								  NULL);
			break;
		case TSMF_SUB_TYPE_MP1V:
			mdecoder->gst_caps = gst_caps_new_simple("video/mpeg",
								 "mpegversion", G_TYPE_INT, 1,
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 "systemstream", G_TYPE_BOOLEAN, FALSE,
								 NULL);
			break;
		case TSMF_SUB_TYPE_YUY2:
#if GST_VERSION_MAJOR > 0
			mdecoder->gst_caps = gst_caps_new_simple("video/x-raw",
								 "format", G_TYPE_STRING, "YUY2",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 NULL);
#else
			mdecoder->gst_caps = gst_caps_new_simple("video/x-raw-yuv",
								 "format", G_TYPE_STRING, "YUY2",
								 "width", G_TYPE_INT, media_type->Width,
								 "height", G_TYPE_INT, media_type->Height,
								 NULL);
#endif
			break;
		case TSMF_SUB_TYPE_MP2V:
			mdecoder->gst_caps = gst_caps_new_simple("video/mpeg",
								 "mpegversion", G_TYPE_INT, 2,
								 "systemstream", G_TYPE_BOOLEAN, FALSE,
								 NULL);
			break;
		case TSMF_SUB_TYPE_MP2A:
			mdecoder->gst_caps =  gst_caps_new_simple("audio/mpeg",
								  "mpegversion", G_TYPE_INT, 2,
								  "rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
								  "channels", G_TYPE_INT, media_type->Channels,
								  NULL);
			break;
		default:
			WLog_ERR(TAG, "unknown format:(%d).", media_type->SubType);
			return FALSE;
	}

	if (media_type->ExtraDataSize > 0)
	{
		GstBuffer *buffer;
		DEBUG_TSMF("Extra data available (%d)", media_type->ExtraDataSize);
		buffer = tsmf_get_buffer_from_data(media_type->ExtraData, media_type->ExtraDataSize);

		if (!buffer)
		{
			WLog_ERR(TAG, "could not allocate GstBuffer!");
			return FALSE;
		}

		gst_caps_set_simple(mdecoder->gst_caps, "codec_data", GST_TYPE_BUFFER, buffer, NULL);
	}

	DEBUG_TSMF("%p format '%s'", mdecoder, gst_caps_to_string(mdecoder->gst_caps));
	tsmf_platform_set_format(mdecoder);

	/* Create the pipeline... */
	if (!tsmf_gstreamer_pipeline_build(mdecoder))
		return FALSE;

	return TRUE;
}
예제 #2
0
static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UINT32 data_size, UINT32 extensions,
									UINT64 start_time, UINT64 end_time, UINT64 duration)
{
	GstBuffer *gst_buf;
	TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
	UINT64 sample_time = tsmf_gstreamer_timestamp_ms_to_gst(start_time);
	UINT64 sample_duration = tsmf_gstreamer_timestamp_ms_to_gst(duration);

	if (!mdecoder)
	{
		WLog_ERR(TAG, "Decoder not initialized!");
		return FALSE;
	}

	/*
	 * This function is always called from a stream-specific thread.
	 * It should be alright to block here if necessary.
	 * We don't expect to block here often, since the pipeline should
	 * have more than enough buffering.
	 */
	DEBUG_TSMF("%s. Start:(%llu) End:(%llu) Duration:(%llu) Last End:(%llu)",
			   get_type(mdecoder), start_time, end_time, duration,
			   mdecoder->last_sample_end_time);

	if (mdecoder->gst_caps == NULL)
	{
		WLog_ERR(TAG, "tsmf_gstreamer_set_format not called or invalid format.");
		return FALSE;
	}

	if (!mdecoder->src)
	{
		WLog_ERR(TAG, "failed to construct pipeline correctly. Unable to push buffer to source element.");
		return FALSE;
	}

	gst_buf = tsmf_get_buffer_from_data(data, data_size);

	if (gst_buf == NULL)
	{
		WLog_ERR(TAG, "tsmf_get_buffer_from_data(%p, %d) failed.", data, data_size);
		return FALSE;
	}

	if (mdecoder->pipeline_start_time_valid)
	{
		long long diff = start_time;
		diff -= mdecoder->last_sample_end_time;

		if (diff < 0)
			diff *= -1;

		/* The pipe is initialized, but there is a discontinuity.
		 * Seek to the start position... */
		if (diff > 50)
		{
			DEBUG_TSMF("%s seeking to %lld", get_type(mdecoder), start_time);

			if (!gst_element_seek(mdecoder->pipe, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE,
								  GST_SEEK_TYPE_SET, sample_time,
								  GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE))
			{
				WLog_ERR(TAG, "seek failed");
			}

			mdecoder->pipeline_start_time_valid = 0;
		}
	}
	else
	{
		DEBUG_TSMF("%s start time %llu", get_type(mdecoder), sample_time);
		mdecoder->pipeline_start_time_valid = 1;
	}

#if GST_VERSION_MAJOR > 0
	GST_BUFFER_PTS(gst_buf) = sample_time;
#else
	GST_BUFFER_TIMESTAMP(gst_buf) = sample_time;
#endif
	GST_BUFFER_DURATION(gst_buf) = sample_duration;
	gst_app_src_push_buffer(GST_APP_SRC(mdecoder->src), gst_buf);

	if (mdecoder->ack_cb)
		mdecoder->ack_cb(mdecoder->stream, TRUE);

	mdecoder->last_sample_end_time = end_time;

	if (GST_STATE(mdecoder->pipe) != GST_STATE_PLAYING)
	{
		DEBUG_TSMF("%s: state=%s", get_type(mdecoder), gst_element_state_get_name(GST_STATE(mdecoder->pipe)));

		if (!mdecoder->paused && !mdecoder->shutdown && mdecoder->ready)
			tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING);
	}

	return TRUE;
}
예제 #3
0
static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UINT32 data_size, UINT32 extensions,
									UINT64 start_time, UINT64 end_time, UINT64 duration)
{
	GstBuffer *gst_buf;
	TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
	UINT64 sample_time = tsmf_gstreamer_timestamp_ms_to_gst(start_time);
	BOOL useTimestamps = TRUE;

	if (!mdecoder)
	{
		WLog_ERR(TAG, "Decoder not initialized!");
		return FALSE;
	}

	/*
	 * This function is always called from a stream-specific thread.
	 * It should be alright to block here if necessary.
	 * We don't expect to block here often, since the pipeline should
	 * have more than enough buffering.
	 */
	DEBUG_TSMF("%s. Start:(%"PRIu64") End:(%"PRIu64") Duration:(%"PRIu64") Last Start:(%"PRIu64")",
			   get_type(mdecoder), start_time, end_time, duration,
			   mdecoder->last_sample_start_time);

	if (mdecoder->shutdown)
	{
		WLog_ERR(TAG, "decodeEx called on shutdown decoder");
		return TRUE;
	}

	if (mdecoder->gst_caps == NULL)
	{
		WLog_ERR(TAG, "tsmf_gstreamer_set_format not called or invalid format.");
		return FALSE;
	}

	if (!mdecoder->pipe)
		tsmf_gstreamer_pipeline_build(mdecoder);

	if (!mdecoder->src)
	{
		WLog_ERR(TAG, "failed to construct pipeline correctly. Unable to push buffer to source element.");
		return FALSE;
	}

	gst_buf = tsmf_get_buffer_from_data(data, data_size);

	if (gst_buf == NULL)
	{
		WLog_ERR(TAG, "tsmf_get_buffer_from_data(%p, %"PRIu32") failed.", (void*) data, data_size);
		return FALSE;
	}

	/* Relative timestamping will sometimes be set to 0
	 * so we ignore these timestamps just to be safe(bit 8)
	 */
	if (extensions & 0x00000080)
	{
		DEBUG_TSMF("Ignoring the timestamps - relative - bit 8");
		useTimestamps = FALSE;
	}

	/* If no timestamps exist then we dont want to look at the timestamp values (bit 7) */
	if (extensions & 0x00000040)
	{
		DEBUG_TSMF("Ignoring the timestamps - none - bit 7");
		useTimestamps = FALSE;
	}

	/* If performing a seek */
	if (mdecoder->seeking)
	{
		mdecoder->seeking = FALSE;
		tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PAUSED);
		mdecoder->pipeline_start_time_valid = 0;	
	}

	if (mdecoder->pipeline_start_time_valid)
	{
		DEBUG_TSMF("%s start time %"PRIu64"", get_type(mdecoder), start_time);

		/* Adjusted the condition for a seek to be based on start time only
		 * WMV1 and WMV2 files in particular have bad end time and duration values
		 * there seems to be no real side effects of just using the start time instead
		 */
		UINT64 minTime = mdecoder->last_sample_start_time - (UINT64) SEEK_TOLERANCE;
		UINT64 maxTime = mdecoder->last_sample_start_time + (UINT64) SEEK_TOLERANCE;

		/* Make sure the minTime stops at 0 , should we be at the beginning of the stream */ 
		if (mdecoder->last_sample_start_time < (UINT64) SEEK_TOLERANCE)
			minTime = 0;    

		/* If the start_time is valid and different from the previous start time by more than the seek tolerance, then we have a seek condition */
		if (((start_time > maxTime) || (start_time < minTime)) && useTimestamps)
		{
			DEBUG_TSMF("tsmf_gstreamer_decodeEx: start_time=[%"PRIu64"] > last_sample_start_time=[%"PRIu64"] OR ", start_time, mdecoder->last_sample_start_time);
			DEBUG_TSMF("tsmf_gstreamer_decodeEx: start_time=[%"PRIu64"] < last_sample_start_time=[%"PRIu64"] with", start_time, mdecoder->last_sample_start_time);
			DEBUG_TSMF("tsmf_gstreamer_decodeEX: a tolerance of more than [%lu] from the last sample", SEEK_TOLERANCE);
			DEBUG_TSMF("tsmf_gstreamer_decodeEX: minTime=[%"PRIu64"] maxTime=[%"PRIu64"]", minTime, maxTime);

			mdecoder->seeking = TRUE;

			/* since we cant make the gstreamer pipeline jump to the new start time after a seek - we just maintain
			 * a offset between realtime and gstreamer time
			 */
			mdecoder->seek_offset = start_time;
		}
	}
	else
	{
		DEBUG_TSMF("%s start time %"PRIu64"", get_type(mdecoder), start_time);
		/* Always set base/start time to 0. Will use seek offset to translate real buffer times
		 * back to 0. This allows the video to be started from anywhere and the ability to handle seeks
		 * without rebuilding the pipeline, etc. since that is costly
		 */
		gst_element_set_base_time(mdecoder->pipe, tsmf_gstreamer_timestamp_ms_to_gst(0));
		gst_element_set_start_time(mdecoder->pipe, tsmf_gstreamer_timestamp_ms_to_gst(0));
		mdecoder->pipeline_start_time_valid = 1;

		/* Set the seek offset if buffer has valid timestamps. */
		if (useTimestamps)
			mdecoder->seek_offset = start_time;

		if (!gst_element_seek(mdecoder->pipe, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
						GST_SEEK_TYPE_SET, 0,
						GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE))
		{
			WLog_ERR(TAG, "seek failed");
		}
	}

#if GST_VERSION_MAJOR > 0
	if (useTimestamps)
		GST_BUFFER_PTS(gst_buf) = sample_time - tsmf_gstreamer_timestamp_ms_to_gst(mdecoder->seek_offset);
	else
		GST_BUFFER_PTS(gst_buf) = GST_CLOCK_TIME_NONE;
#else
	if (useTimestamps)
		GST_BUFFER_TIMESTAMP(gst_buf) = sample_time - tsmf_gstreamer_timestamp_ms_to_gst(mdecoder->seek_offset);
	else
		GST_BUFFER_TIMESTAMP(gst_buf) = GST_CLOCK_TIME_NONE;
#endif
	GST_BUFFER_DURATION(gst_buf) = GST_CLOCK_TIME_NONE;
	GST_BUFFER_OFFSET(gst_buf) = GST_BUFFER_OFFSET_NONE;
#if GST_VERSION_MAJOR > 0
#else
	gst_buffer_set_caps(gst_buf, mdecoder->gst_caps);
#endif
	gst_app_src_push_buffer(GST_APP_SRC(mdecoder->src), gst_buf);

	/* Should only update the last timestamps if the current ones are valid */
	if (useTimestamps)
	{
		mdecoder->last_sample_start_time = start_time;
		mdecoder->last_sample_end_time = end_time;
	}

	if (mdecoder->pipe && (GST_STATE(mdecoder->pipe) != GST_STATE_PLAYING))
	{
		DEBUG_TSMF("%s: state=%s", get_type(mdecoder), gst_element_state_get_name(GST_STATE(mdecoder->pipe)));	

		DEBUG_TSMF("%s Paused: %"PRIi32"   Shutdown: %i   Ready: %"PRIi32"", get_type(mdecoder), mdecoder->paused, mdecoder->shutdown, mdecoder->ready);
		if (!mdecoder->paused && !mdecoder->shutdown && mdecoder->ready)
			tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING);
	}

	return TRUE;
}