Exemplo n.º 1
0
vod_status_t
audio_filter_alloc_state(
	request_context_t* request_context,
	media_sequence_t* sequence,
	media_clip_t* clip,
	media_track_t* output_track,
	size_t* cache_buffer_count,
	void** result)
{
	audio_filter_init_context_t init_context;
	vod_status_t rc;

	// get the source count and graph desc size
	init_context.request_context = request_context;
	init_context.graph_desc_size = 0;
	init_context.source_count = 0;
	init_context.output_frame_count = 0;

	rc = audio_filter_walk_filters_prepare_init(&init_context, &clip, 100, 100);
	if (rc != VOD_OK)
	{
		return rc;
	}

	if (clip == NULL || init_context.source_count <= 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_alloc_state: unexpected - no sources found");
		return VOD_UNEXPECTED;
	}

	if (clip->type == MEDIA_CLIP_SOURCE)
	{
		// got left with a source, following a mix of a single source, nothing to do
		return VOD_OK;
	}

	vod_log_error(VOD_LOG_ERR, request_context->log, 0,
		"audio_filter_alloc_state: audio filtering not supported, recompile with avcodec/avfilter to enable it");
	return VOD_UNEXPECTED;
}
Exemplo n.º 2
0
vod_status_t
audio_filter_alloc_state(
	request_context_t* request_context,
	media_sequence_t* sequence,
	media_clip_t* clip,
	media_track_t* output_track,
	size_t* cache_buffer_count,
	void** result)
{
	audio_filter_init_context_t init_context;
	u_char filter_name[VOD_INT32_LEN + 1];
	audio_filter_state_t* state;
	vod_pool_cleanup_t *cln;
	AVFilterInOut *outputs = NULL;
	AVFilterInOut *inputs = NULL;
	uint32_t initial_alloc_size;
	vod_status_t rc;
	int avrc;

	if (!initialized)
	{
		vod_log_debug0(VOD_LOG_DEBUG_LEVEL, request_context->log, 0,
			"audio_filter_alloc_state: module failed to initialize successfully");
		return VOD_UNEXPECTED;
	}

	// get the source count and graph desc size
	init_context.request_context = request_context;
	init_context.graph_desc_size = 0;
	init_context.source_count = 0;
	init_context.output_frame_count = 0;

	rc = audio_filter_walk_filters_prepare_init(&init_context, &clip, 100, 100);
	if (rc != VOD_OK)
	{
		return rc;
	}

	if (clip == NULL || init_context.source_count <= 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_alloc_state: unexpected - no sources found");
		return VOD_UNEXPECTED;
	}

	if (clip->type == MEDIA_CLIP_SOURCE)
	{
		// got left with a source, following a mix of a single source, nothing to do
		return VOD_OK;
	}

	if (init_context.output_frame_count > MAX_FRAME_COUNT)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_alloc_state: expected output frame count %uD too big", init_context.output_frame_count);
		return VOD_BAD_REQUEST;
	}

	// allocate the state
	state = vod_alloc(request_context->pool, sizeof(*state));
	if (state == NULL)
	{
		vod_log_debug0(VOD_LOG_DEBUG_LEVEL, request_context->log, 0,
			"audio_filter_alloc_state: vod_alloc failed");
		return VOD_ALLOC_FAILED;
	}
	vod_memzero(state, sizeof(*state));
	
	// add to the cleanup pool
	cln = vod_pool_cleanup_add(request_context->pool, 0);
	if (cln == NULL)
	{
		vod_log_debug0(VOD_LOG_DEBUG_LEVEL, request_context->log, 0,
			"audio_filter_alloc_state: vod_pool_cleanup_add failed");
		return VOD_ALLOC_FAILED;
	}

	cln->handler = audio_filter_free_state;
	cln->data = state;

	// allocate the filter graph
	state->filter_graph = avfilter_graph_alloc();
	if (state->filter_graph == NULL)
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_filter_alloc_state: avfilter_graph_alloc failed");
		return VOD_ALLOC_FAILED;
	}

	// allocate the graph desc and sources
	init_context.graph_desc = vod_alloc(request_context->pool, init_context.graph_desc_size + 
		sizeof(state->sources[0]) * init_context.source_count);
	if (init_context.graph_desc == NULL)
	{
		vod_log_debug0(VOD_LOG_DEBUG_LEVEL, request_context->log, 0,
			"audio_filter_alloc_state: vod_alloc failed (1)");
		return VOD_ALLOC_FAILED;
	}

	state->sources = (void*)(init_context.graph_desc + init_context.graph_desc_size);
	state->sources_end = state->sources + init_context.source_count;
	vod_memzero(state->sources, (u_char*)state->sources_end - (u_char*)state->sources);

	// initialize the sources and the graph description
	init_context.filter_graph = state->filter_graph;
	init_context.outputs = &outputs;
	init_context.cur_source = state->sources;
	init_context.graph_desc_pos = init_context.graph_desc;
	init_context.max_frame_size = 0;
	init_context.cache_slot_id = 0;

	rc = audio_filter_init_sources_and_graph_desc(&init_context, clip);
	if (rc != VOD_OK)
	{
		goto end;
	}

	*init_context.graph_desc_pos = '\0';

	// initialize the sink
	vod_sprintf(filter_name, "%uD%Z", clip->id);

	rc = audio_filter_init_sink(
		request_context,
		state->filter_graph,
		output_track,
		filter_name,
		&state->sink,
		&inputs);
	if (rc != VOD_OK)
	{
		goto end;
	}

	// parse the graph description
	avrc = avfilter_graph_parse_ptr(state->filter_graph, (char*)init_context.graph_desc, &inputs, &outputs, NULL);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_alloc_state: avfilter_graph_parse_ptr failed %d", avrc);
		rc = VOD_UNEXPECTED;
		goto end;
	}

	// validate and configure the graph
	avrc = avfilter_graph_config(state->filter_graph, NULL);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_alloc_state: avfilter_graph_config failed %d", avrc);
		rc = VOD_UNEXPECTED;
		goto end;
	}

	// set the buffer sink frame size
	if ((state->sink.encoder->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) == 0)
	{
		av_buffersink_set_frame_size(state->sink.buffer_sink, state->sink.encoder->frame_size);
	}
	
	// allocate frames
	state->decoded_frame = av_frame_alloc();
	if (state->decoded_frame == NULL)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_alloc_state: av_frame_alloc failed (1)");
		return VOD_ALLOC_FAILED;
	}
	state->filtered_frame = av_frame_alloc();
	if (state->filtered_frame == NULL)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_alloc_state: av_frame_alloc failed (2)");
		return VOD_ALLOC_FAILED;
	}

	// allocate the frame buffer
	state->frame_buffer = vod_alloc(request_context->pool, init_context.max_frame_size);
	if (state->frame_buffer == NULL)
	{
		vod_log_debug0(VOD_LOG_DEBUG_LEVEL, request_context->log, 0,
			"audio_filter_alloc_state: vod_alloc failed (2)");
		rc = VOD_ALLOC_FAILED;
		goto end;
	}

	// initialize the output arrays
	initial_alloc_size = init_context.output_frame_count + 10;

	if (vod_array_init(&state->frames_array, request_context->pool, initial_alloc_size, sizeof(input_frame_t)) != VOD_OK)
	{
		vod_log_debug0(VOD_LOG_DEBUG_LEVEL, request_context->log, 0,
			"audio_filter_alloc_state: vod_array_init failed (1)");
		return VOD_ALLOC_FAILED;
	}

	state->request_context = request_context;
	state->sequence = sequence;
	state->output = output_track;
	state->cur_frame_pos = 0;
	state->first_time = TRUE;
	state->cur_source = NULL;

	*cache_buffer_count = init_context.cache_slot_id;
	*result = state;

end:

	avfilter_inout_free(&inputs);
	avfilter_inout_free(&outputs);

	return rc;
}
Exemplo n.º 3
0
static vod_status_t 
audio_filter_walk_filters_prepare_init(
	audio_filter_init_context_t* state, 
	media_clip_t** clip_ptr, 
	uint32_t speed_nom, 
	uint32_t speed_denom)
{
	media_clip_rate_filter_t* rate_filter;
	media_clip_source_t* source;
	media_track_t* audio_track;
	media_track_t* cur_track;
	media_clip_t** sources_end;
	media_clip_t** sources_cur;
	media_clip_t* clip = *clip_ptr;
	media_clip_t* last_source = NULL;
	vod_status_t rc;
	uint32_t cur_frame_count;
	uint32_t source_count;

	switch (clip->type)
	{
	case MEDIA_CLIP_SOURCE:
		source = vod_container_of(clip, media_clip_source_t, base);

		audio_track = NULL;
		for (cur_track = source->track_array.first_track; cur_track < source->track_array.last_track; cur_track++)
		{
			if (cur_track->media_info.media_type != MEDIA_TYPE_AUDIO)
			{
				continue;
			}

			if (audio_track != NULL)
			{
				vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
					"audio_filter_walk_filters_prepare_init: more than one audio track per source - unsupported");
				return VOD_BAD_REQUEST;
			}

			audio_track = cur_track;
		}

		if (audio_track == NULL || audio_track->frame_count == 0)
		{
			*clip_ptr = NULL;
			return VOD_OK;
		}

		state->source_count++;

		cur_frame_count = ((uint64_t)audio_track->frame_count * speed_denom) / speed_nom;
		if (state->output_frame_count < cur_frame_count)
		{
			state->output_frame_count = cur_frame_count;
		}
		return VOD_OK;

	case MEDIA_CLIP_RATE_FILTER:
		rate_filter = vod_container_of(clip, media_clip_rate_filter_t, base);
		speed_nom = ((uint64_t)speed_nom * rate_filter->rate.nom) / rate_filter->rate.denom;
		break;

	default:;
	}

	// recursively prepare the child sources
	source_count = 0;

	sources_end = clip->sources + clip->source_count;
	for (sources_cur = clip->sources; sources_cur < sources_end; sources_cur++)
	{
		rc = audio_filter_walk_filters_prepare_init(state, sources_cur, speed_nom, speed_denom);
		if (rc != VOD_OK)
		{
			return rc;
		}

		if (*sources_cur != NULL)
		{
			source_count++;
			last_source = *sources_cur;
		}
	}

	// skip the current filter when it's not needed
	switch (source_count)
	{
	case 0:
		*clip_ptr = NULL;
		return VOD_OK;

	case 1:
		switch (clip->type)
		{
		case MEDIA_CLIP_MIX_FILTER:
		case MEDIA_CLIP_CONCAT_FILTER:
			// in case of mixing a single clip or concat, skip the filter
			*clip_ptr = last_source;
			return VOD_OK;

		default:;
		}
		break;
	}

	// update the graph description size
	state->graph_desc_size += clip->audio_filter->get_filter_desc_size(clip) + 1;	// 1 = ';'

	return VOD_OK;
}