Esempio n. 1
0
void* audio_exec(void* udata) {
  float buffer_time_us = (float)(1e6 * NUM_SAMPLES) / SAMPLE_FREQ;
  while(1) {
    /* get a buffer */
    OMX_BUFFERHEADERTYPE *hdr;
    while((hdr = ilclient_get_input_buffer(audio_render, 100, 0)) == NULL) {
      usleep(buffer_time_us / 4); // wait 1/4 of the time to drain a buffer
    }

    // fill the buffer
    audio_fill_buffer((int16_t*)hdr->pBuffer, NUM_SAMPLES * 2);
    hdr->nOffset = 0;
    hdr->nFilledLen = scaled_buffer_size(NUM_SAMPLES);

    // submit the buffer
    OMX_ERRORTYPE error;
    error = OMX_EmptyThisBuffer(ILC_GET_HANDLE(audio_render), hdr);
    assert(error == OMX_ErrorNone);

    // drive down the latency to a buffer's length or less
    uint32_t latency;
    while(audio_get_latency() > NUM_SAMPLES) {
      usleep(buffer_time_us / 2);
    }
  }
}
Esempio n. 2
0
/*
 * audio record callback
 * args:
 *   s - pointer to pa_stream
 *   length - buffer length
 *   data - pointer to user data
 *
 * asserts:
 *   none
 *
 * returns: none
 */
static void stream_request_cb(pa_stream *s, size_t length, void *data)
{

    audio_context_t *audio_ctx = (audio_context_t *) data;

	if(audio_ctx->channels == 0)
	{
		fprintf(stderr, "AUDIO: (pulseaudio) stream_request_cb failed: channels = 0\n");
		return;
	}
	
	if(audio_ctx->samprate == 0)
	{
		fprintf(stderr, "AUDIO: (pulseaudio) stream_request_cb failed: samprate = 0\n");
		return;
	}
	
	uint64_t frame_length = NSEC_PER_SEC / audio_ctx->samprate; /*in nanosec*/
	int64_t ts = 0;
	int64_t buff_ts = 0;
	uint32_t i = 0;

	while (pa_stream_readable_size(s) > 0)
	{
		const void *inputBuffer;
		size_t length;

		/*read from stream*/
		if (pa_stream_peek(s, &inputBuffer, &length) < 0)
		{
			fprintf(stderr, "AUDIO: (pulseaudio) pa_stream_peek() failed\n");
			return;
		}

		if(length == 0)
		{
			fprintf(stderr, "AUDIO: (pulseaudio) empty buffer!\n");
			return; /*buffer is empty*/
		}

		get_latency(s);

		ts = ns_time_monotonic() - (latency * 1000);

		if(audio_ctx->last_ts <= 0)
			audio_ctx->last_ts = ts;


		uint32_t numSamples = (uint32_t) length / sizeof(sample_t);

		const sample_t *rptr = (const sample_t*) inputBuffer;
		sample_t *capture_buff = (sample_t *) audio_ctx->capture_buff;

		int chan = 0;
		/*store capture samples or silence if inputBuffer == NULL (hole)*/
		for( i = 0; i < numSamples; ++i )
		{
			capture_buff[sample_index] = inputBuffer ? *rptr++ : 0;
			sample_index++;

			/*store peak value*/
			if(audio_ctx->capture_buff_level[chan] < capture_buff[sample_index])
				audio_ctx->capture_buff_level[chan] = capture_buff[sample_index];
			chan++;
			if(chan >= audio_ctx->channels)
				chan = 0;

			if(sample_index >= audio_ctx->capture_buff_size)
			{
				buff_ts = ts + ( i / audio_ctx->channels ) * frame_length;

				audio_fill_buffer(audio_ctx, buff_ts);

				/*reset*/
				audio_ctx->capture_buff_level[0] = 0;
				audio_ctx->capture_buff_level[1] = 0;
				sample_index = 0;
			}
		}

		pa_stream_drop(s); /*clean the samples*/
	}

}
Esempio n. 3
0
/*
 * Portaudio record callback
 * args:
 *    inputBuffer - pointer to captured input data (for recording)
 *    outputBuffer - pointer ouput data (for playing - NOT USED)
 *    framesPerBuffer - buffer size
 *    timeInfo - pointer to time data (for timestamping)
 *    statusFlags - stream status
 *    userData - pointer to user data (audio context)
 *
 * asserts:
 *    audio_ctx (userData) is not null
 *
 * returns: error code (0 ok)
 */
static int recordCallback (
	const void *inputBuffer,
	void *outputBuffer,
	unsigned long framesPerBuffer,
	const PaStreamCallbackTimeInfo* timeInfo,
	PaStreamCallbackFlags statusFlags,
	void *userData )
{
	audio_context_t *audio_ctx = (audio_context_t *) userData;

	/*asserts*/
	assert(audio_ctx != NULL);

	int i = 0;

	sample_t *rptr = (sample_t*) inputBuffer;
	sample_t *capture_buff = (sample_t *) audio_ctx->capture_buff;

	unsigned long numSamples = framesPerBuffer * audio_ctx->channels;
	uint64_t frame_length = NSEC_PER_SEC / audio_ctx->samprate; /*in nanosec*/

	PaTime ts_sec = timeInfo->inputBufferAdcTime; /*in seconds (double)*/
	int64_t ts = ts_sec * NSEC_PER_SEC; /*in nanosec (monotonic time)*/
	int64_t buff_ts = 0;

	/*determine the number of samples dropped*/
	if(audio_ctx->last_ts <= 0)
	{
		audio_ctx->last_ts = ts;
	}

	if(statusFlags & paInputOverflow)
	{
		fprintf( stderr, "AUDIO: portaudio buffer overflow\n" );

		int64_t d_ts = ts - audio_ctx->last_ts;
		int n_samples = (d_ts / frame_length) * audio_ctx->channels;
		for( i = 0; i < n_samples; ++i )
		{
			capture_buff[sample_index] = 0;
			sample_index++;

			if(sample_index >= audio_ctx->capture_buff_size)
			{
				audio_fill_buffer(audio_ctx, audio_ctx->last_ts);
				sample_index = 0;
			}
		}

		if(verbosity > 1)
			printf("AUDIO: compensate overflow with %i silence samples\n", n_samples);
	}
	if(statusFlags & paInputUnderflow)
		fprintf( stderr, "AUDIO: portaudio buffer underflow\n" );

	if(sample_index == 0)
	{
		audio_ctx->capture_buff_level[0] = 0;
		audio_ctx->capture_buff_level[1] = 0;
	}

	int chan = 0;
	/*store capture samples*/
	for( i = 0; i < numSamples; ++i )
    {
        capture_buff[sample_index] = inputBuffer ? *rptr++ : 0;
        sample_index++;

		/*store peak value*/
		if(audio_ctx->capture_buff_level[chan] < capture_buff[sample_index])
			audio_ctx->capture_buff_level[chan] = capture_buff[sample_index];
		chan++;
		if(chan >= audio_ctx->channels)
			chan = 0;

        if(sample_index >= audio_ctx->capture_buff_size)
		{
			buff_ts = ts + ( i / audio_ctx->channels ) * frame_length;

			audio_fill_buffer(audio_ctx, buff_ts);

			/*reset*/
			audio_ctx->capture_buff_level[0] = 0;
			audio_ctx->capture_buff_level[1] = 0;
			sample_index = 0;
		}
	}

	audio_ctx->last_ts = ts + (framesPerBuffer * frame_length);

	if(audio_ctx->stream_flag == AUDIO_STRM_OFF )
		return (paComplete); /*capture stopped*/
	else
		return (paContinue); /*still capturing*/
}
Esempio n. 4
0
JNIEXPORT void JNICALL Java_com_fiftyply_mosaic_AudioThread_nativeFillAudioBuffer(JNIEnv* env, jobject obj, jobject buffer) {
  void* bytes = env->GetDirectBufferAddress(buffer);
  long nBytes = env->GetDirectBufferCapacity(buffer);
  long nSamples = nBytes / sizeof(int16_t);
  audio_fill_buffer((int16_t*)bytes, nSamples);
}