Exemple #1
0
static uint64_t mix_and_output(struct audio_output *audio, uint64_t audio_time,
		uint64_t prev_time)
{
	struct audio_line *line = audio->first_line;
	uint32_t frames = (uint32_t)ts_diff_frames(audio, audio_time,
	                                           prev_time);
	size_t bytes = frames * audio->block_size;

#ifdef DEBUG_AUDIO
	blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu",
			audio_time, prev_time, bytes);
#endif

	/* return an adjusted audio_time according to the amount
	 * of data that was sampled to ensure seamless transmission */
	audio_time = prev_time + conv_frames_to_time(audio, frames);

	/* resize and clear mix buffers */
	for (size_t i = 0; i < audio->planes; i++) {
		da_resize(audio->mix_buffers[i], bytes);
		memset(audio->mix_buffers[i].array, 0, bytes);
	}

	/* mix audio lines */
	while (line) {
		struct audio_line *next = line->next;

		/* if line marked for removal, destroy and move to the next */
		if (!line->buffers[0].size) {
			if (!line->alive) {
				audio_output_removeline(audio, line);
				line = next;
				continue;
			}
		}

		pthread_mutex_lock(&line->mutex);

		if (line->buffers[0].size && line->base_timestamp < prev_time) {
			clear_excess_audio_data(line, prev_time);
			line->base_timestamp = prev_time;
		}

		if (mix_audio_line(audio, line, bytes, prev_time))
			line->base_timestamp = audio_time;

		pthread_mutex_unlock(&line->mutex);

		line = next;
	}

	/* output */
	do_audio_output(audio, prev_time, frames);

	return audio_time;
}
static void input_and_output(struct audio_output *audio,
		uint64_t audio_time, uint64_t prev_time)
{
	size_t bytes = AUDIO_OUTPUT_FRAMES * audio->block_size;
	struct audio_output_data data[MAX_AUDIO_MIXES];
	uint32_t active_mixes = 0;
	uint64_t new_ts = 0;
	bool success;

	memset(data, 0, sizeof(data));

#ifdef DEBUG_AUDIO
	blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu",
			audio_time, prev_time, bytes);
#endif

	/* get mixers */
	pthread_mutex_lock(&audio->input_mutex);
	for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
		if (audio->mixes[i].inputs.num)
			active_mixes |= (1 << i);
	}
	pthread_mutex_unlock(&audio->input_mutex);

	/* clear mix buffers */
	for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
		struct audio_mix *mix = &audio->mixes[mix_idx];

		memset(mix->buffer[0], 0, AUDIO_OUTPUT_FRAMES *
				MAX_AUDIO_CHANNELS * sizeof(float));

		for (size_t i = 0; i < audio->planes; i++)
			data[mix_idx].data[i] = mix->buffer[i];
	}

	/* get new audio data */
	success = audio->input_cb(audio->input_param, prev_time, audio_time,
			&new_ts, active_mixes, data);
	if (!success)
		return;

	/* clamps audio data to -1.0..1.0 */
	clamp_audio_output(audio, bytes);

	/* output */
	for (size_t i = 0; i < MAX_AUDIO_MIXES; i++)
		do_audio_output(audio, i, new_ts, AUDIO_OUTPUT_FRAMES);
}
Exemple #3
0
static uint64_t mix_and_output(struct audio_output *audio, uint64_t audio_time,
		uint64_t prev_time)
{
	struct audio_line *line = audio->first_line;
	uint32_t frames = (uint32_t)ts_diff_frames(audio, audio_time,
	                                           prev_time);
	size_t bytes = frames * audio->block_size;

#ifdef DEBUG_AUDIO
	blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu",
			audio_time, prev_time, bytes);
#endif

	/* return an adjusted audio_time according to the amount
	 * of data that was sampled to ensure seamless transmission */
	audio_time = prev_time + conv_frames_to_time(audio, frames);

	/* resize and clear mix buffers */
	for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
		struct audio_mix *mix = &audio->mixes[mix_idx];

		for (size_t i = 0; i < audio->planes; i++) {
			da_resize(mix->mix_buffers[i], bytes);
			memset(mix->mix_buffers[i].array, 0, bytes);
		}
	}

	/* mix audio lines */
	while (line) {
		struct audio_line *next = line->next;

		/* if line marked for removal, destroy and move to the next */
		if (!line->buffers[0].size) {
			if (!line->alive) {
				audio_output_removeline(audio, line);
				line = next;
				continue;
			}
		}

		pthread_mutex_lock(&line->mutex);

		if (line->buffers[0].size && line->base_timestamp < prev_time) {
			clear_excess_audio_data(line, prev_time);
			line->base_timestamp = prev_time;

		} else if (line->audio_getting_cut_off) {
			line->audio_getting_cut_off = false;
			blog(LOG_WARNING, "Audio line '%s' audio data no "
			                  "longer getting cut off.",
			                  line->name);
		}

		if (mix_audio_line(audio, line, bytes, prev_time))
			line->base_timestamp = audio_time;

		pthread_mutex_unlock(&line->mutex);

		line = next;
	}

	/* clamps audio data to -1.0..1.0 */
	clamp_audio_output(audio, bytes);

	/* output */
	for (size_t i = 0; i < MAX_AUDIO_MIXES; i++)
		do_audio_output(audio, i, prev_time, frames);

	return audio_time;
}