예제 #1
0
static void tsmf_sample_playback(TSMF_SAMPLE* sample)
{
	BOOL ret = FALSE;
	UINT32 width;
	UINT32 height;
	UINT32 pixfmt = 0;
	TSMF_STREAM* stream = sample->stream;

	if (stream->decoder)
	{
		if (stream->decoder->DecodeEx)
			ret = stream->decoder->DecodeEx(stream->decoder, sample->data, sample->data_size, sample->extensions,
        			sample->start_time, sample->end_time, sample->duration);
		else
			ret = stream->decoder->Decode(stream->decoder, sample->data, sample->data_size, sample->extensions);
	}

	if (!ret)
	{
		tsmf_sample_ack(sample);
		tsmf_sample_free(sample);
		return;
	}

	free(sample->data);
	sample->data = NULL;

	if (stream->major_type == TSMF_MAJOR_TYPE_VIDEO)
	{
		if (stream->decoder->GetDecodedFormat)
		{
			pixfmt = stream->decoder->GetDecodedFormat(stream->decoder);
			if (pixfmt == ((UINT32) -1))
			{
				tsmf_sample_ack(sample);
				tsmf_sample_free(sample);
				return;
			}
			sample->pixfmt = pixfmt;
		}

		ret = FALSE ;
		if (stream->decoder->GetDecodedDimension)
		{
			ret = stream->decoder->GetDecodedDimension(stream->decoder, &width, &height);
			if (ret && (width != stream->width || height != stream->height))
			{
				DEBUG_DVC("video dimension changed to %d x %d", width, height);
				stream->width = width;
				stream->height = height;
			}
		}
	}

	if (stream->decoder->GetDecodedData)
	{
		sample->data = stream->decoder->GetDecodedData(stream->decoder, &sample->decoded_size);
		switch (sample->stream->major_type)
		{
			case TSMF_MAJOR_TYPE_VIDEO:
				tsmf_sample_playback_video(sample);
				tsmf_sample_ack(sample);
				tsmf_sample_free(sample);
				break;
			case TSMF_MAJOR_TYPE_AUDIO:
				tsmf_sample_playback_audio(sample);
				tsmf_sample_queue_ack(sample);
				break;
		}
	}
	else
	{
		TSMF_STREAM * stream = sample->stream;
		UINT64 ack_anticipation_time = get_current_time();
		UINT64 currentRunningTime = sample->start_time;
		UINT32 bufferLevel = 0;
		if (stream->decoder->GetRunningTime)
		{
			currentRunningTime = stream->decoder->GetRunningTime(stream->decoder);
		}
		if (stream->decoder->BufferLevel)
		{
			bufferLevel = stream->decoder->BufferLevel(stream->decoder);
		}
		switch (sample->stream->major_type)
		{
			case TSMF_MAJOR_TYPE_VIDEO:
			{
				TSMF_PRESENTATION * presentation = sample->stream->presentation;
				/*
				 *	Tell gstreamer that presentation screen area has moved.
				 *	So it can render on the new area.
				*/
				if (presentation->last_x != presentation->output_x || presentation->last_y != presentation->output_y ||
					presentation->last_width != presentation->output_width || presentation->last_height != presentation->output_height)
				{
					presentation->last_x = presentation->output_x;
					presentation->last_y = presentation->output_y;
					presentation->last_width = presentation->output_width;
					presentation->last_height = presentation->output_height;
					if(stream->decoder->UpdateRenderingArea)
					{
						stream->decoder->UpdateRenderingArea(stream->decoder, presentation->output_x, presentation->output_y,
						presentation->output_width, presentation->output_height, presentation->output_num_rects, presentation->output_rects);
					}
				}
				if ( presentation->last_num_rects != presentation->output_num_rects || (presentation->last_rects && presentation->output_rects &&
					memcmp(presentation->last_rects, presentation->output_rects, presentation->last_num_rects * sizeof(RDP_RECT)) != 0))
				{
					if (presentation->last_rects)
					{
						free(presentation->last_rects);
						presentation->last_rects = NULL;
					}

					presentation->last_num_rects = presentation->output_num_rects;

					if (presentation->last_num_rects > 0)
					{
						presentation->last_rects = malloc(presentation->last_num_rects * sizeof(RDP_RECT));
						ZeroMemory(presentation->last_rects, presentation->last_num_rects * sizeof(RDP_RECT));
						memcpy(presentation->last_rects, presentation->output_rects, presentation->last_num_rects * sizeof(RDP_RECT));
					}
					if(stream->decoder->UpdateRenderingArea)
					{
						stream->decoder->UpdateRenderingArea(stream->decoder, presentation->output_x, presentation->output_y,
						presentation->output_width, presentation->output_height, presentation->output_num_rects, presentation->output_rects);
					}
				}

				if (bufferLevel < 24)
				{
					ack_anticipation_time += sample->duration;
				}
				else
				{
					if (currentRunningTime > sample->start_time)
					{
						ack_anticipation_time += sample->duration;
					}
					else if(currentRunningTime == 0)
					{
						ack_anticipation_time += sample->duration;
					}
					else
					{
						ack_anticipation_time += (sample->start_time - currentRunningTime);
					}
				}
				break;
			}
			case TSMF_MAJOR_TYPE_AUDIO:
			{
				last_played_audio_time = currentRunningTime;
				if (bufferLevel < 2)
				{
					ack_anticipation_time += sample->duration;
				}
				else
				{
					if (currentRunningTime > sample->start_time)
					{
						ack_anticipation_time += sample->duration;
					}
					else if(currentRunningTime == 0)
					{
						ack_anticipation_time += sample->duration;
					}
					else
					{
						ack_anticipation_time += (sample->start_time - currentRunningTime);
					}
				}
				break;
			}
		}
		sample->ack_time = ack_anticipation_time;
		tsmf_sample_queue_ack(sample);
        }
}
예제 #2
0
파일: tsmf_media.c 프로젝트: artemh/FreeRDP
static BOOL tsmf_sample_playback(TSMF_SAMPLE* sample)
{
	BOOL ret = FALSE;
	UINT32 width;
	UINT32 height;
	UINT32 pixfmt = 0;
	TSMF_STREAM* stream = sample->stream;

	if (stream->decoder)
	{
		if (stream->decoder->DecodeEx)
		{
			/* Try to "sync" video buffers to audio buffers by looking at the running time for each stream
			 * The difference between the two running times causes an offset between audio and video actual
			 * render times. So, we try to adjust timestamps on the video buffer to match those on the audio buffer.
			 */
			if (stream->major_type == TSMF_MAJOR_TYPE_VIDEO)
			{	
				TSMF_STREAM* temp_stream = NULL;
				TSMF_PRESENTATION* presentation = stream->presentation;
				ArrayList_Lock(presentation->stream_list);
				int count = ArrayList_Count(presentation->stream_list);
				int index = 0;
				for (index = 0; index < count; index++)
				{
					UINT64 time_diff;

					temp_stream = (TSMF_STREAM*) ArrayList_GetItem(presentation->stream_list, index);
					if (temp_stream->major_type == TSMF_MAJOR_TYPE_AUDIO)
					{
						UINT64 video_time = (UINT64) stream->decoder->GetRunningTime(stream->decoder);
						UINT64 audio_time = (UINT64) temp_stream->decoder->GetRunningTime(temp_stream->decoder);
						UINT64 max_adjust = VIDEO_ADJUST_MAX;

						if (video_time < audio_time)
							max_adjust = -VIDEO_ADJUST_MAX;

						if (video_time > audio_time)
							time_diff = video_time - audio_time;
						else
							time_diff = audio_time - video_time;

						time_diff = time_diff < VIDEO_ADJUST_MAX ? time_diff : max_adjust;
						sample->start_time += time_diff;
						sample->end_time += time_diff;

						break;
					}
				}
				ArrayList_Unlock(presentation->stream_list);
			}

			ret = stream->decoder->DecodeEx(stream->decoder, sample->data, sample->data_size, sample->extensions,
						sample->start_time, sample->end_time, sample->duration);
		}
		else
		{
			ret = stream->decoder->Decode(stream->decoder, sample->data, sample->data_size, sample->extensions);
		}
	}

	if (!ret)
	{
		WLog_ERR(TAG, "decode error, queue ack anyways");
		if (!tsmf_sample_queue_ack(sample))
		{
			WLog_ERR(TAG, "error queuing sample for ack");
			return FALSE;
		}

		return TRUE;
	}

	free(sample->data);
	sample->data = NULL;

	if (stream->major_type == TSMF_MAJOR_TYPE_VIDEO)
	{
		if (stream->decoder->GetDecodedFormat)
		{
			pixfmt = stream->decoder->GetDecodedFormat(stream->decoder);

			if (pixfmt == ((UINT32) -1))
			{
				WLog_ERR(TAG, "unable to decode video format");
				if (!tsmf_sample_queue_ack(sample))
				{
					WLog_ERR(TAG, "error queuing sample for ack");
				}
				return FALSE;
			}

			sample->pixfmt = pixfmt;
		}

		if (stream->decoder->GetDecodedDimension)
		{
			ret = stream->decoder->GetDecodedDimension(stream->decoder, &width, &height);

			if (ret && (width != stream->width || height != stream->height))
			{
				DEBUG_TSMF("video dimension changed to %d x %d", width, height);
				stream->width = width;
				stream->height = height;
			}
		}
	}

	if (stream->decoder->GetDecodedData)
	{
		sample->data = stream->decoder->GetDecodedData(stream->decoder, &sample->decoded_size);

		switch (sample->stream->major_type)
		{
			case TSMF_MAJOR_TYPE_VIDEO:
				ret = tsmf_sample_playback_video(sample) &&
					tsmf_sample_queue_ack(sample);
				break;

			case TSMF_MAJOR_TYPE_AUDIO:
				ret = tsmf_sample_playback_audio(sample) &&
					tsmf_sample_queue_ack(sample);
				break;
		}
	}
	else
	{
		TSMF_STREAM* stream = sample->stream;
		UINT64 ack_anticipation_time = get_current_time();
		BOOL buffer_filled = TRUE;

		/* Classify the buffer as filled once it reaches minimum level */
		if (stream->decoder->BufferLevel)
		{
			if (stream->currentBufferLevel < stream->minBufferLevel)
				buffer_filled = FALSE;
		}

		if (buffer_filled)
		{
			ack_anticipation_time += (sample->duration/2 < MAX_ACK_TIME) ? sample->duration/2 : MAX_ACK_TIME;
		}
		else
		{
			ack_anticipation_time += (sample->duration/2 < MAX_ACK_TIME) ? sample->duration/2 : MAX_ACK_TIME;
		}

		switch (sample->stream->major_type)
		{
			case TSMF_MAJOR_TYPE_VIDEO:
			{
				break;
			}

			case TSMF_MAJOR_TYPE_AUDIO:
			{
				break;
			}
		}

		sample->ack_time = ack_anticipation_time;
		if (!tsmf_sample_queue_ack(sample))
		{
			WLog_ERR(TAG, "error queuing sample for ack");
			ret = FALSE;
		}
	}

	return ret;
}
예제 #3
0
static void tsmf_sample_playback(TSMF_SAMPLE* sample)
{
	BOOL ret = FALSE;
	UINT32 width;
	UINT32 height;
	UINT32 pixfmt = 0;
	TSMF_STREAM* stream = sample->stream;

	if (stream->decoder)
	{
		if (stream->decoder->DecodeEx)
		{
			ret = stream->decoder->DecodeEx(stream->decoder, sample->data, sample->data_size, sample->extensions,
						sample->start_time, sample->end_time, sample->duration);
		}
		else
		{
			ret = stream->decoder->Decode(stream->decoder, sample->data, sample->data_size, sample->extensions);
		}
	}

	if (!ret)
	{
		tsmf_sample_queue_ack(sample);
		return;
	}

	free(sample->data);
	sample->data = NULL;

	if (stream->major_type == TSMF_MAJOR_TYPE_VIDEO)
	{
		if (stream->decoder->GetDecodedFormat)
		{
			pixfmt = stream->decoder->GetDecodedFormat(stream->decoder);

			if (pixfmt == ((UINT32) -1))
			{
				tsmf_sample_queue_ack(sample);
				return;
			}

			sample->pixfmt = pixfmt;
		}

		ret = FALSE;

		if (stream->decoder->GetDecodedDimension)
		{
			ret = stream->decoder->GetDecodedDimension(stream->decoder, &width, &height);

			if (ret && (width != stream->width || height != stream->height))
			{
				DEBUG_TSMF("video dimension changed to %d x %d", width, height);
				stream->width = width;
				stream->height = height;
			}
		}
	}

	if (stream->decoder->GetDecodedData)
	{
		sample->data = stream->decoder->GetDecodedData(stream->decoder, &sample->decoded_size);

		switch (sample->stream->major_type)
		{
			case TSMF_MAJOR_TYPE_VIDEO:
				tsmf_sample_playback_video(sample);
				tsmf_sample_queue_ack(sample);
				break;

			case TSMF_MAJOR_TYPE_AUDIO:
				tsmf_sample_playback_audio(sample);
				tsmf_sample_queue_ack(sample);
				break;
		}
	}
	else
	{
		TSMF_STREAM* stream = sample->stream;
		UINT64 ack_anticipation_time = get_current_time();
		UINT64 currentRunningTime = sample->start_time;
		BOOL buffer_filled = TRUE;

		if (stream->decoder->GetRunningTime)
		{
			currentRunningTime = stream->decoder->GetRunningTime(stream->decoder);
		}

		if (stream->decoder->BufferFilled)
		{
			buffer_filled = stream->decoder->BufferFilled(stream->decoder);
		}

		if (buffer_filled)
		{
			if (currentRunningTime > sample->start_time)
			{
				ack_anticipation_time += sample->duration;
			}
			else if (currentRunningTime == 0)
			{
				ack_anticipation_time += sample->duration;
			}
			else
			{
				ack_anticipation_time += (sample->start_time - currentRunningTime);
			}
		}
		else
		{
			ack_anticipation_time += sample->duration / 2;
		}

		switch (sample->stream->major_type)
		{
			case TSMF_MAJOR_TYPE_VIDEO:
				{
					break;
				}

			case TSMF_MAJOR_TYPE_AUDIO:
				{
					break;
				}
		}

		sample->ack_time = ack_anticipation_time;
		tsmf_sample_queue_ack(sample);
	}
}