예제 #1
0
int TestQueue(int argc, char* argv[])
{
	int item;
	int index;
	int count;
	wQueue* queue;

	queue = Queue_New(TRUE, -1, -1);
	if (!queue)
		return -1;

	for (index = 1; index <= 10; index++)
	{
		Queue_Enqueue(queue, (void*) (size_t) index);
	}

	count = Queue_Count(queue);
	printf("queue count: %d\n", count);

	for (index = 1; index <= 10; index++)
	{
		item = (int) (size_t) Queue_Dequeue(queue);

		if (item != index)
			return -1;
	}

	count = Queue_Count(queue);
	printf("queue count: %d\n", count);

	Queue_Enqueue(queue, (void*) (size_t) 1);
	Queue_Enqueue(queue, (void*) (size_t) 2);
	Queue_Enqueue(queue, (void*) (size_t) 3);

	Queue_Dequeue(queue);
	Queue_Dequeue(queue);

	Queue_Enqueue(queue, (void*) (size_t) 4);
	Queue_Enqueue(queue, (void*) (size_t) 5);
	Queue_Enqueue(queue, (void*) (size_t) 6);

	Queue_Dequeue(queue);
	Queue_Dequeue(queue);
	Queue_Dequeue(queue);
	Queue_Dequeue(queue);

	Queue_Clear(queue);
	Queue_Free(queue);

	return 0;
}
예제 #2
0
// close the android audio device
void android_CloseRecDevice(OPENSL_STREAM *p)
{
	DEBUG_DVC("p=%p", p);

  if (p == NULL)
    return;

	if (p->queue)
	{
		while (Queue_Count(p->queue) > 0)
		{
			queue_element *e = Queue_Dequeue(p->queue);
			free(e->data);
			free(e);
		}
		Queue_Free(p->queue);
	}

	if (p->next)
	{
		free(p->next->data);
		free(p->next);
	}

	if (p->prep)
	{
		free(p->prep->data);
		free(p->prep);
	}

  openSLDestroyEngine(p);

  free(p);
}
예제 #3
0
// puts a buffer of size samples to the device
int android_AudioOut(OPENSL_STREAM* p, const short* buffer, int size)
{
	assert(p);
	assert(buffer);
	assert(size > 0);

	/* Assure, that the queue is not full. */
	if (p->queuesize <= Queue_Count(p->queue)
	    && WaitForSingleObject(p->queue->event, INFINITE) == WAIT_FAILED)
	{
		DEBUG_SND("WaitForSingleObject failed!");
		return -1;
	}

	void* data = calloc(size, sizeof(short));

	if (!data)
	{
		DEBUG_SND("unable to allocate a buffer");
		return -1;
	}

	memcpy(data, buffer, size * sizeof(short));
	Queue_Enqueue(p->queue, data);
	(*p->bqPlayerBufferQueue)->Enqueue(p->bqPlayerBufferQueue,
	                                   data, sizeof(short) * size);
	return size;
}
예제 #4
0
// gets a buffer of size samples from the device
int android_RecIn(OPENSL_STREAM *p,short *buffer,int size)
{
	queue_element *e;
	int rc;

	assert(p);
	assert(buffer);
	assert(size > 0);

	/* Initial trigger for the queue. */
	if (!p->prep)
	{
		p->prep = calloc(1, sizeof(queue_element));
		p->prep->data = calloc(p->buffersize, p->bits_per_sample / 8);
		p->prep->size = p->buffersize * p->bits_per_sample / 8;

		p->next = calloc(1, sizeof(queue_element));
		p->next->data = calloc(p->buffersize, p->bits_per_sample / 8);
		p->next->size = p->buffersize * p->bits_per_sample / 8;

  	(*p->recorderBufferQueue)->Enqueue(p->recorderBufferQueue, 
				p->next->data, p->next->size);
  	(*p->recorderBufferQueue)->Enqueue(p->recorderBufferQueue, 
				p->prep->data, p->prep->size);

    (*p->recorderRecord)->SetRecordState(p->recorderRecord, SL_RECORDSTATE_RECORDING);
	}

	/* Wait for queue to be filled... */
	if (!Queue_Count(p->queue))
		WaitForSingleObject(p->queue->event, INFINITE);

	e = Queue_Dequeue(p->queue);
	if (!e)
	{
		WLog_ERR(TAG, "[ERROR] got e=%p from queue", e);
		return -1;
	}

	rc = (e->size < size) ? e->size : size;
	assert(size == e->size);
	assert(p->buffersize * p->bits_per_sample / 8 == size);

	memcpy(buffer, e->data, rc);
	free(e->data);
	free(e);

  return rc;
}
예제 #5
0
void rdpsnd_count_frames(rdpsndIOSPlugin* p)
{
	int targetFrames;
	waveItem* peek;
	
	peek = Queue_Peek(p->waveQ);
	if (!peek)
	{
		printf("empty waveQ!\n");
		return;
	}
	
	
	targetFrames = peek->numFrames;
	//printf("count %d/%d frames\n", p->frameCnt, targetFrames);
	
	if (p->frameCnt >= targetFrames)
	{
		UINT16 tB;
		UINT16 diff;
		
		tB = (UINT16)GetTickCount();
		diff = tB - peek->localTimeStampA;
		
		//frameCnt = frameCnt - peek->numFrames;
		p->frameCnt = 0;
		
		peek = Queue_Dequeue(p->waveQ);
		
		rdpsnd_send_wave_confirm_pdu(p->device.rdpsnd, peek->remoteTimeStampA + diff, peek->ID);
		//printf("confirm with latency:%d\n", diff);
		
		printf("\tConfirm %02X timeStamp A:%d B:%d diff %d (qCount=%d)\n"
		       , peek->ID,
		       peek->remoteTimeStampA,
		       peek->remoteTimeStampA + diff,
		       diff,
		       Queue_Count(p->waveQ));
		
		free(peek);
	}
}
예제 #6
0
static void tsmf_stream_process_ack(TSMF_STREAM* stream)
{
	TSMF_SAMPLE* sample;
	UINT64 ack_time;

	ack_time = get_current_time();

	while ((Queue_Count(stream->sample_ack_list) > 0) && !(WaitForSingleObject(stream->stopEvent, 0) == WAIT_OBJECT_0))
	{
		sample = (TSMF_SAMPLE*) Queue_Peek(stream->sample_ack_list);

		if (!sample || (sample->ack_time > ack_time))
			break;

		sample = Queue_Dequeue(stream->sample_ack_list);

		tsmf_sample_ack(sample);
		tsmf_sample_free(sample);
	}
}
예제 #7
0
static void VideoClientContextPriv_free(VideoClientContextPriv *priv)
{
	EnterCriticalSection(&priv->framesLock);
	while (Queue_Count(priv->frames))
	{
		VideoFrame *frame = Queue_Dequeue(priv->frames);
		if (frame)
			VideoFrame_free(&frame);
	}

	Queue_Free(priv->frames);
	LeaveCriticalSection(&priv->framesLock);

	DeleteCriticalSection(&priv->framesLock);

	if (priv->currentPresentation)
		PresentationContext_unref(priv->currentPresentation);

	BufferPool_Free(priv->surfacePool);
	free(priv);
}
예제 #8
0
파일: tsmf_media.c 프로젝트: artemh/FreeRDP
static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
{
	UINT32 index;
	UINT32 count;
	TSMF_STREAM *s;
	TSMF_SAMPLE* sample;
	BOOL pending = FALSE;
	TSMF_PRESENTATION* presentation = stream->presentation;

	if (!stream)
		return NULL;

	if (Queue_Count(stream->sample_list) < 1)
		return NULL;

	if (sync)
	{
		if (stream->decoder)
		{
			if (stream->decoder->GetDecodedData)
			{
				if (stream->major_type == TSMF_MAJOR_TYPE_AUDIO)
				{
					/* Check if some other stream has earlier sample that needs to be played first */
					/* Start time is more reliable than end time as some stream types seem to have incorrect
					 * end times from the server
					 */
					if (stream->last_start_time > AUDIO_TOLERANCE)
					{
						ArrayList_Lock(presentation->stream_list);
						count = ArrayList_Count(presentation->stream_list);

						for (index = 0; index < count; index++)
						{
							s = (TSMF_STREAM *) ArrayList_GetItem(presentation->stream_list, index);

							/* Start time is more reliable than end time as some stream types seem to have incorrect
							 * end times from the server
							 */
							if (s != stream && !s->eos && s->last_start_time &&
								s->last_start_time < stream->last_start_time - AUDIO_TOLERANCE)
							{
								DEBUG_TSMF("Pending due to audio tolerance");
								pending = TRUE;
								break;
							}
						}

						ArrayList_Unlock(presentation->stream_list);
					}
				}
				else
				{
					/* Start time is more reliable than end time as some stream types seem to have incorrect
					 * end times from the server
					 */
					if (stream->last_start_time > presentation->audio_start_time)
					{
						DEBUG_TSMF("Pending due to stream start time > audio start time");
						pending = TRUE;
					}
				}
			}
		}
	}

	if (pending)
		return NULL;

	sample = (TSMF_SAMPLE *) Queue_Dequeue(stream->sample_list);

	/* Only update stream last end time if the sample end time is valid and greater than the current stream end time */
	if (sample && (sample->end_time > stream->last_end_time) && (!sample->invalidTimestamps))
		stream->last_end_time = sample->end_time;

	/* Only update stream last start time if the sample start time is valid and greater than the current stream start time */
	if (sample && (sample->start_time > stream->last_start_time) && (!sample->invalidTimestamps))
		stream->last_start_time = sample->start_time;

	return sample;
}
예제 #9
0
//This callback is used to feed the AU buffers
static OSStatus rdpsnd_ios_render_cb(
				     void *inRefCon,
				     AudioUnitRenderActionFlags *ioActionFlags,
				     const AudioTimeStamp *inTimeStamp,
				     UInt32 inBusNumber,
				     UInt32 inNumberFrames,
				     AudioBufferList *ioData
				     )
{
	unsigned int i;
	
	if (inBusNumber != 0)
	{
		return noErr;
	}
	
	rdpsndIOSPlugin *p = THIS(inRefCon);
	
	//printf("Playing %d frames... ", (unsigned int)inNumberFrames);
	
	//pthread_mutex_lock(&p->bMutex);
	for (i = 0; i < ioData->mNumberBuffers; i++)
	{
		//printf("buf%d ", i);
		/*printf("buf size = %d (%lums) ",
		 (unsigned int)ioData->mBuffers[i].mDataByteSize,
		 (ioData->mBuffers[i].mDataByteSize * 1000) / p->bpsAvg);
		 */
		AudioBuffer* target_buffer = &ioData->mBuffers[i];
		
		int32_t available_bytes = 0;
		const void *buffer = TPCircularBufferTail(&p->buffer, &available_bytes);
		if (buffer != NULL && available_bytes > 0)
		{
			const int bytes_to_copy = MIN((int32_t)target_buffer->mDataByteSize, available_bytes);
			
			memcpy(target_buffer->mData, buffer, bytes_to_copy);
			target_buffer->mDataByteSize = bytes_to_copy;
			
			TPCircularBufferConsume(&p->buffer, bytes_to_copy);
			
			p->frameCnt += inNumberFrames;
		}
		else
		{
			*ioActionFlags = *ioActionFlags | kAudioUnitRenderAction_OutputIsSilence;
			
			//FIXME: force sending of any remaining items in queue
			if (Queue_Count(p->waveQ) > 0)
			{
				p->frameCnt += 1000000;
			}
			
			//in case we didnt get a post render callback first (observed)
			rdpsnd_count_frames(p);
			
			
			target_buffer->mDataByteSize = 0;
			AudioOutputUnitStop(p->audio_unit);
			//p->is_playing = 0;
			rdpsnd_set_isPlaying(p, FALSE);
			
			printf("Buffer is empty with frameCnt:%d(uderrun)\n", p->frameCnt);
		}
	}
	//pthread_mutex_unlock(&p->bMutex);
	
	
	return noErr;
}
예제 #10
0
static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
{
	TSMF_STREAM* s;
	LIST_ITEM* item;
	TSMF_SAMPLE* sample;
	BOOL pending = FALSE;
	TSMF_PRESENTATION* presentation = stream->presentation;

	if (Queue_Count(stream->sample_list) < 1)
		return NULL;

	if (sync)
	{
		if (stream->decoder)
		{
			if (stream->decoder->GetDecodedData)
			{
				if (stream->major_type == TSMF_MAJOR_TYPE_AUDIO)
				{
					/* Check if some other stream has earlier sample that needs to be played first */
					if (stream->last_end_time > AUDIO_TOLERANCE)
					{
						WaitForSingleObject(presentation->mutex, INFINITE);

						for (item = presentation->stream_list->head; item; item = item->next)
						{
							s = (TSMF_STREAM*) item->data;

							if (s != stream && !s->eos && s->last_end_time &&
								s->last_end_time < stream->last_end_time - AUDIO_TOLERANCE)
							{
									pending = TRUE;
									break;
							}
						}

						ReleaseMutex(presentation->mutex);
					}
				}
				else
				{
					if (stream->last_end_time > presentation->audio_end_time)
					{
						pending = TRUE;
					}
				}

			}
		}
	}

	if (pending)
		return NULL;

	sample = (TSMF_SAMPLE*) Queue_Dequeue(stream->sample_list);

	if (sample && (sample->end_time > stream->last_end_time))
		stream->last_end_time = sample->end_time;

	return sample;
}
예제 #11
0
static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
{
	UINT32 index;
	UINT32 count;
	TSMF_STREAM *s;
	TSMF_SAMPLE* sample;
	BOOL pending = FALSE;
	TSMF_PRESENTATION* presentation = stream->presentation;

	if (!stream)
		return NULL;

	if (Queue_Count(stream->sample_list) < 1)
		return NULL;

	if (sync)
	{
		if (stream->decoder)
		{
			if (stream->decoder->GetDecodedData)
			{
				if (stream->major_type == TSMF_MAJOR_TYPE_AUDIO)
				{
					/* Check if some other stream has earlier sample that needs to be played first */
					if (stream->last_end_time > AUDIO_TOLERANCE)
					{
						ArrayList_Lock(presentation->stream_list);
						count = ArrayList_Count(presentation->stream_list);

						for (index = 0; index < count; index++)
						{
							s = (TSMF_STREAM *) ArrayList_GetItem(presentation->stream_list, index);

							if (s != stream && !s->eos && s->last_end_time &&
									s->last_end_time < stream->last_end_time - AUDIO_TOLERANCE)
							{
								pending = TRUE;
								break;
							}
						}

						ArrayList_Unlock(presentation->stream_list);
					}
				}
				else
				{
					if (stream->last_end_time > presentation->audio_end_time)
					{
						pending = TRUE;
					}
				}
			}
		}
	}

	if (pending)
		return NULL;

	sample = (TSMF_SAMPLE *) Queue_Dequeue(stream->sample_list);

	if (sample && (sample->end_time > stream->last_end_time))
		stream->last_end_time = sample->end_time;

	return sample;
}
예제 #12
0
static UINT video_VideoData(VideoClientContext* context, TSMM_VIDEO_DATA *data)
{
	VideoClientContextPriv *priv = context->priv;
	PresentationContext *presentation;
	int status;

	presentation = priv->currentPresentation;
	if (!presentation)
	{
		WLog_ERR(TAG, "no current presentation");
		return CHANNEL_RC_OK;
	}

	if (presentation->PresentationId != data->PresentationId)
	{
		WLog_ERR(TAG, "current presentation id=%d doesn't match data id=%d", presentation->PresentationId,
				data->PresentationId);
		return CHANNEL_RC_OK;
	}

	if (!Stream_EnsureRemainingCapacity(presentation->currentSample, data->cbSample))
	{
		WLog_ERR(TAG, "unable to expand the current packet");
		return CHANNEL_RC_NO_MEMORY;
	}

	Stream_Write(presentation->currentSample, data->pSample, data->cbSample);

	if (data->CurrentPacketIndex == data->PacketsInSample)
	{
		H264_CONTEXT *h264 = presentation->h264;
		UINT64 startTime = GetTickCount64(), timeAfterH264;
		MAPPED_GEOMETRY *geom = presentation->geometry;

		Stream_SealLength(presentation->currentSample);
		Stream_SetPosition(presentation->currentSample, 0);

		status = h264->subsystem->Decompress(h264, Stream_Pointer(presentation->currentSample),
				Stream_Length(presentation->currentSample));
		if (status == 0)
			return CHANNEL_RC_OK;

		if (status < 0)
			return CHANNEL_RC_OK;

		timeAfterH264 = GetTickCount64();
		if (data->SampleNumber == 1)
		{
			presentation->lastPublishTime = startTime;
		}

		presentation->lastPublishTime += (data->hnsDuration / 10000);
		if (presentation->lastPublishTime <= timeAfterH264 + 10)
		{
			int dropped = 0;

			/* if the frame is to be published in less than 10 ms, let's consider it's now */
			yuv_to_rgb(presentation, presentation->surfaceData);

			context->showSurface(context, presentation->surface);

			priv->publishedFrames++;

			/* cleanup previously scheduled frames */
			EnterCriticalSection(&priv->framesLock);
			while (Queue_Count(priv->frames) > 0)
			{
				VideoFrame *frame = Queue_Dequeue(priv->frames);
				if (frame)
				{
					priv->droppedFrames++;
					VideoFrame_free(&frame);
					dropped++;
				}
			}
			LeaveCriticalSection(&priv->framesLock);

			if (dropped)
				WLog_DBG(TAG, "showing frame (%d dropped)", dropped);
		}
		else
		{
			BOOL enqueueResult;
			VideoFrame *frame = calloc(1, sizeof(*frame));
			if (!frame)
			{
				WLog_ERR(TAG, "unable to create frame");
				return CHANNEL_RC_NO_MEMORY;
			}
			mappedGeometryRef(geom);

			frame->presentation = presentation;
			frame->publishTime = presentation->lastPublishTime;
			frame->geometry = geom;
			frame->w = presentation->SourceWidth;
			frame->h = presentation->SourceHeight;

			frame->surfaceData = BufferPool_Take(priv->surfacePool, frame->w * frame->h * 4);
			if (!frame->surfaceData)
			{
				WLog_ERR(TAG, "unable to allocate frame data");
				mappedGeometryUnref(geom);
				free(frame);
				return CHANNEL_RC_NO_MEMORY;
			}

			if (!yuv_to_rgb(presentation, frame->surfaceData))
			{
				WLog_ERR(TAG, "error during YUV->RGB conversion");
				BufferPool_Return(priv->surfacePool, frame->surfaceData);
				mappedGeometryUnref(geom);
				free(frame);
				return CHANNEL_RC_NO_MEMORY;
			}

			InterlockedIncrement(&presentation->refCounter);

			EnterCriticalSection(&priv->framesLock);
			enqueueResult = Queue_Enqueue(priv->frames, frame);
			LeaveCriticalSection(&priv->framesLock);

			if (!enqueueResult)
			{
				WLog_ERR(TAG, "unable to enqueue frame");
				VideoFrame_free(&frame);
				return CHANNEL_RC_NO_MEMORY;
			}

			WLog_DBG(TAG, "scheduling frame in %"PRIu32" ms", (frame->publishTime-startTime));
		}
	}

	return CHANNEL_RC_OK;
}