Пример #1
0
static BOOL tsmf_stream_process_ack(void* arg, BOOL force)
{
	TSMF_STREAM* stream = arg;
	TSMF_SAMPLE* sample;
	UINT64 ack_time;
	BOOL rc = FALSE;

	if (!stream)
		return FALSE;

	Queue_Lock(stream->sample_ack_list);
	sample = (TSMF_SAMPLE*) Queue_Peek(stream->sample_ack_list);

	if (!sample)
		goto finally;

	if (!force)
	{
		ack_time = get_current_time();

		if (sample->ack_time > ack_time)
			goto finally;
	}

	sample = Queue_Dequeue(stream->sample_ack_list);
	tsmf_sample_ack(sample);
	tsmf_sample_free(sample);

finally:
	Queue_Unlock(stream->sample_ack_list);
	return rc;
}
Пример #2
0
/* Returns TRUE if no more samples are currently available
 * Returns FALSE otherwise
 */
static BOOL tsmf_stream_process_ack(void* arg, BOOL force)
{
	TSMF_STREAM* stream = arg;
	TSMF_SAMPLE* sample;
	UINT64 ack_time;
	BOOL rc = FALSE;

	if (!stream)
		return TRUE;

	Queue_Lock(stream->sample_ack_list);
	sample = (TSMF_SAMPLE*) Queue_Peek(stream->sample_ack_list);

	if (!sample)
	{
		rc = TRUE;
		goto finally;
	}

	if (!force)
	{
		/* Do some min/max ack limiting if we have access to Buffer level information */
		if (stream->decoder && stream->decoder->BufferLevel)
		{
			/* Try to keep buffer level below max by withholding acks */
			if (stream->currentBufferLevel > stream->maxBufferLevel)
				goto finally;
			/* Try to keep buffer level above min by pushing acks through quickly */
			else if (stream->currentBufferLevel < stream->minBufferLevel)
				goto dequeue;
		}

		/* Time based acks only */
		ack_time = get_current_time();

		if (sample->ack_time > ack_time)
			goto finally;
	}

dequeue:
	sample = Queue_Dequeue(stream->sample_ack_list);

	if (sample)
	{
		tsmf_sample_ack(sample);
		tsmf_sample_free(sample);
	}

finally:
	Queue_Unlock(stream->sample_ack_list);
	return rc;
}
Пример #3
0
static void tsmf_stream_process_ack(TSMF_STREAM* stream)
{
	TSMF_SAMPLE* sample;
	UINT64 ack_time;

	ack_time = get_current_time();

	while ((Queue_Count(stream->sample_ack_list) > 0) && !(WaitForSingleObject(stream->stopEvent, 0) == WAIT_OBJECT_0))
	{
		sample = (TSMF_SAMPLE*) Queue_Peek(stream->sample_ack_list);

		if (!sample || (sample->ack_time > ack_time))
			break;

		sample = Queue_Dequeue(stream->sample_ack_list);

		tsmf_sample_ack(sample);
		tsmf_sample_free(sample);
	}
}
Пример #4
0
static void tsmf_sample_playback(TSMF_SAMPLE* sample)
{
	BOOL ret = FALSE;
	UINT32 width;
	UINT32 height;
	UINT32 pixfmt = 0;
	TSMF_STREAM* stream = sample->stream;

	if (stream->decoder)
	{
		if (stream->decoder->DecodeEx)
			ret = stream->decoder->DecodeEx(stream->decoder, sample->data, sample->data_size, sample->extensions,
        			sample->start_time, sample->end_time, sample->duration);
		else
			ret = stream->decoder->Decode(stream->decoder, sample->data, sample->data_size, sample->extensions);
	}

	if (!ret)
	{
		tsmf_sample_ack(sample);
		tsmf_sample_free(sample);
		return;
	}

	free(sample->data);
	sample->data = NULL;

	if (stream->major_type == TSMF_MAJOR_TYPE_VIDEO)
	{
		if (stream->decoder->GetDecodedFormat)
		{
			pixfmt = stream->decoder->GetDecodedFormat(stream->decoder);
			if (pixfmt == ((UINT32) -1))
			{
				tsmf_sample_ack(sample);
				tsmf_sample_free(sample);
				return;
			}
			sample->pixfmt = pixfmt;
		}

		ret = FALSE ;
		if (stream->decoder->GetDecodedDimension)
		{
			ret = stream->decoder->GetDecodedDimension(stream->decoder, &width, &height);
			if (ret && (width != stream->width || height != stream->height))
			{
				DEBUG_DVC("video dimension changed to %d x %d", width, height);
				stream->width = width;
				stream->height = height;
			}
		}
	}

	if (stream->decoder->GetDecodedData)
	{
		sample->data = stream->decoder->GetDecodedData(stream->decoder, &sample->decoded_size);
		switch (sample->stream->major_type)
		{
			case TSMF_MAJOR_TYPE_VIDEO:
				tsmf_sample_playback_video(sample);
				tsmf_sample_ack(sample);
				tsmf_sample_free(sample);
				break;
			case TSMF_MAJOR_TYPE_AUDIO:
				tsmf_sample_playback_audio(sample);
				tsmf_sample_queue_ack(sample);
				break;
		}
	}
	else
	{
		TSMF_STREAM * stream = sample->stream;
		UINT64 ack_anticipation_time = get_current_time();
		UINT64 currentRunningTime = sample->start_time;
		UINT32 bufferLevel = 0;
		if (stream->decoder->GetRunningTime)
		{
			currentRunningTime = stream->decoder->GetRunningTime(stream->decoder);
		}
		if (stream->decoder->BufferLevel)
		{
			bufferLevel = stream->decoder->BufferLevel(stream->decoder);
		}
		switch (sample->stream->major_type)
		{
			case TSMF_MAJOR_TYPE_VIDEO:
			{
				TSMF_PRESENTATION * presentation = sample->stream->presentation;
				/*
				 *	Tell gstreamer that presentation screen area has moved.
				 *	So it can render on the new area.
				*/
				if (presentation->last_x != presentation->output_x || presentation->last_y != presentation->output_y ||
					presentation->last_width != presentation->output_width || presentation->last_height != presentation->output_height)
				{
					presentation->last_x = presentation->output_x;
					presentation->last_y = presentation->output_y;
					presentation->last_width = presentation->output_width;
					presentation->last_height = presentation->output_height;
					if(stream->decoder->UpdateRenderingArea)
					{
						stream->decoder->UpdateRenderingArea(stream->decoder, presentation->output_x, presentation->output_y,
						presentation->output_width, presentation->output_height, presentation->output_num_rects, presentation->output_rects);
					}
				}
				if ( presentation->last_num_rects != presentation->output_num_rects || (presentation->last_rects && presentation->output_rects &&
					memcmp(presentation->last_rects, presentation->output_rects, presentation->last_num_rects * sizeof(RDP_RECT)) != 0))
				{
					if (presentation->last_rects)
					{
						free(presentation->last_rects);
						presentation->last_rects = NULL;
					}

					presentation->last_num_rects = presentation->output_num_rects;

					if (presentation->last_num_rects > 0)
					{
						presentation->last_rects = malloc(presentation->last_num_rects * sizeof(RDP_RECT));
						ZeroMemory(presentation->last_rects, presentation->last_num_rects * sizeof(RDP_RECT));
						memcpy(presentation->last_rects, presentation->output_rects, presentation->last_num_rects * sizeof(RDP_RECT));
					}
					if(stream->decoder->UpdateRenderingArea)
					{
						stream->decoder->UpdateRenderingArea(stream->decoder, presentation->output_x, presentation->output_y,
						presentation->output_width, presentation->output_height, presentation->output_num_rects, presentation->output_rects);
					}
				}

				if (bufferLevel < 24)
				{
					ack_anticipation_time += sample->duration;
				}
				else
				{
					if (currentRunningTime > sample->start_time)
					{
						ack_anticipation_time += sample->duration;
					}
					else if(currentRunningTime == 0)
					{
						ack_anticipation_time += sample->duration;
					}
					else
					{
						ack_anticipation_time += (sample->start_time - currentRunningTime);
					}
				}
				break;
			}
			case TSMF_MAJOR_TYPE_AUDIO:
			{
				last_played_audio_time = currentRunningTime;
				if (bufferLevel < 2)
				{
					ack_anticipation_time += sample->duration;
				}
				else
				{
					if (currentRunningTime > sample->start_time)
					{
						ack_anticipation_time += sample->duration;
					}
					else if(currentRunningTime == 0)
					{
						ack_anticipation_time += sample->duration;
					}
					else
					{
						ack_anticipation_time += (sample->start_time - currentRunningTime);
					}
				}
				break;
			}
		}
		sample->ack_time = ack_anticipation_time;
		tsmf_sample_queue_ack(sample);
        }
}