static adaptation_set_t*
track_groups_to_adaptation_sets(
	track_groups_t* groups,
	media_track_t*** cur_track_ptr_arg,
	adaptation_set_t* cur_adaptation_set)
{
	media_track_t** cur_track_ptr = *cur_track_ptr_arg;
	track_group_t* group;
	vod_queue_t* list = &groups->list;
	vod_queue_t* node;

	for (node = vod_queue_head(list); node != list; node = node->next)
	{
		group = vod_container_of(node, track_group_t, list_node);

		cur_track_ptr = track_group_to_adaptation_set(
			group,
			cur_track_ptr,
			cur_adaptation_set);

		cur_adaptation_set++;
	}

	*cur_track_ptr_arg = cur_track_ptr;

	return cur_adaptation_set;
}
Beispiel #2
0
static void
filter_get_clip_track_count(media_clip_t* clip, uint32_t* track_count)
{
	media_clip_source_t* source;
	media_track_t* cur_track;
	media_clip_t** cur_source;
	media_clip_t** sources_end;

	if (clip->type == MEDIA_CLIP_SOURCE)
	{
		source = vod_container_of(clip, media_clip_source_t, base);
		for (cur_track = source->track_array.first_track;
			cur_track < source->track_array.last_track;
			cur_track++)
		{
			track_count[cur_track->media_info.media_type]++;
		}
		return;
	}

	// recursively count child sources
	sources_end = clip->sources + clip->source_count;
	for (cur_source = clip->sources; cur_source < sources_end; cur_source++)
	{
		filter_get_clip_track_count(*cur_source, track_count);
	}
}
static void
track_group_rbtree_insert_value(
	vod_rbtree_node_t *temp,
	vod_rbtree_node_t *node,
	vod_rbtree_node_t *sentinel)
{
	vod_rbtree_node_t **p;
	track_group_t *n, *t;

	for (;;)
	{
		n = vod_container_of(node, track_group_t, rbtree_node);
		t = vod_container_of(temp, track_group_t, rbtree_node);

		if (node->key != temp->key)
		{
			p = (node->key < temp->key) ? &temp->left : &temp->right;
		}
		else
		{
			p = (track_group_key_compare(&n->key, &t->key) < 0)
				? &temp->left : &temp->right;
		}

		if (*p == sentinel)
		{
			break;
		}

		temp = *p;
	}

	*p = node;
	node->parent = temp;
	node->left = sentinel;
	node->right = sentinel;
	vod_rbt_red(node);
}
static track_group_t *
track_group_rbtree_lookup(vod_rbtree_t *rbtree, track_group_key_t* key, uint32_t hash)
{
	vod_rbtree_node_t *node, *sentinel;
	track_group_t *n;
	vod_int_t rc;

	node = rbtree->root;
	sentinel = rbtree->sentinel;

	while (node != sentinel)
	{
		n = vod_container_of(node, track_group_t, rbtree_node);

		if (hash != node->key)
		{
			node = (hash < node->key) ? node->left : node->right;
			continue;
		}

		rc = track_group_key_compare(key, &n->key);
		if (rc < 0)
		{
			node = node->left;
			continue;
		}

		if (rc > 0)
		{
			node = node->right;
			continue;
		}

		return n;
	}

	return NULL;
}
static u_char*
rate_filter_append_desc(u_char* p, media_clip_t* clip)
{
	media_clip_rate_filter_t* filter = vod_container_of(clip, media_clip_rate_filter_t, base);
	uint32_t denom;
	uint32_t nom;

	// normalize the fraction to 100 denom
	nom = filter->rate.nom;
	denom = filter->rate.denom;
	while (denom < 100)
	{
		nom *= 10;
		denom *= 10;
	}

	return vod_sprintf(
		p,
		RATE_FILTER_DESC_PATTERN,
		clip->sources[0]->id,
		nom / 100,
		nom % 100,
		clip->id);
}
vod_status_t
manifest_utils_get_adaptation_sets(
	request_context_t* request_context,
	media_set_t* media_set,
	uint32_t flags,
	adaptation_sets_t* output)
{
	track_group_key_t* muxed_audio_key;
	adaptation_set_t* cur_adaptation_set;
	adaptation_set_t* adaptation_sets;
	track_groups_t groups[MEDIA_TYPE_COUNT];
	track_group_t* first_audio_group;
	media_track_t** cur_track_ptr;
	vod_status_t rc;
	uint32_t media_type;
	size_t adaptation_sets_count;

	// update flags
	if (media_set->track_count[MEDIA_TYPE_VIDEO] <= 0)
	{
		// cannot generate muxed media set if there are only subtitles
		if (media_set->track_count[MEDIA_TYPE_AUDIO] <= 0 &&
			(flags & ADAPTATION_SETS_FLAG_MUXED) != 0)
		{
			vod_log_error(VOD_LOG_ERR, request_context->log, 0,
				"manifest_utils_get_adaptation_sets: no audio/video tracks");
			return VOD_BAD_REQUEST;
		}

		flags |= ADAPTATION_SETS_FLAG_IGNORE_SUBTITLES;
	}

	if (manifest_utils_is_multi_audio(media_set))
	{
		flags |= ADAPTATION_SETS_FLAG_MULTI_AUDIO;
		output->multi_audio = TRUE;
	}
	else
	{
		output->multi_audio = FALSE;
	}

	// initialize the track groups
	rc = track_groups_from_media_set(
		request_context,
		media_set,
		flags,
		MEDIA_TYPE_NONE,
		groups);
	if (rc != VOD_OK)
	{
		return rc;
	}

	if (vod_all_flags_set(flags, ADAPTATION_SETS_FLAG_MULTI_AUDIO | ADAPTATION_SETS_FLAG_DEFAULT_LANG_LAST))
	{
		vod_queue_t* first = vod_queue_head(&groups[MEDIA_TYPE_AUDIO].list);
		vod_queue_remove(first);
		vod_queue_insert_tail(&groups[MEDIA_TYPE_AUDIO].list, first);
	}

	// get the number of adaptation sets
	if (groups[MEDIA_TYPE_VIDEO].count > 0 && (flags & ADAPTATION_SETS_FLAG_MUXED) != 0)
	{
		output->count[ADAPTATION_TYPE_MUXED] = 1;
		output->count[ADAPTATION_TYPE_VIDEO] = 0;
		if (groups[MEDIA_TYPE_AUDIO].count > 1)
		{
			output->count[ADAPTATION_TYPE_AUDIO] = groups[MEDIA_TYPE_AUDIO].count;
			if ((flags & ADAPTATION_SETS_FLAG_EXCLUDE_MUXED_AUDIO) != 0)
			{
				output->count[ADAPTATION_TYPE_AUDIO]--;
			}
		}
		else
		{
			output->count[ADAPTATION_TYPE_AUDIO] = 0;
		}
	}
	else
	{
		output->count[ADAPTATION_TYPE_MUXED] = 0;
		output->count[ADAPTATION_TYPE_VIDEO] = groups[MEDIA_TYPE_VIDEO].count;
		output->count[ADAPTATION_TYPE_AUDIO] = groups[MEDIA_TYPE_AUDIO].count;
	}

	output->count[ADAPTATION_TYPE_SUBTITLE] = groups[MEDIA_TYPE_SUBTITLE].count;

	adaptation_sets_count =
		output->count[ADAPTATION_TYPE_VIDEO] +
		output->count[ADAPTATION_TYPE_AUDIO] + 
		output->count[ADAPTATION_TYPE_SUBTITLE] + 
		output->count[ADAPTATION_TYPE_MUXED];

	// allocate the adaptation sets and tracks
	adaptation_sets = vod_alloc(request_context->pool, 
		sizeof(adaptation_sets[0]) * adaptation_sets_count + 
		sizeof(adaptation_sets[0].first[0]) * media_set->total_track_count);
	if (adaptation_sets == NULL)
	{
		vod_log_debug0(VOD_LOG_DEBUG_LEVEL, request_context->log, 0,
			"manifest_utils_get_adaptation_sets: vod_alloc failed");
		return VOD_ALLOC_FAILED;
	}

	cur_track_ptr = (void*)(adaptation_sets + adaptation_sets_count);
	cur_adaptation_set = adaptation_sets;

	if (output->count[ADAPTATION_TYPE_MUXED] > 0)
	{
		// initialize the muxed adaptation set
		first_audio_group = vod_container_of(
			vod_queue_head(&groups[MEDIA_TYPE_AUDIO].list), track_group_t, list_node);

		muxed_audio_key = NULL;
		if (output->count[ADAPTATION_TYPE_AUDIO] > 0)
		{
			flags |= ADAPTATION_SETS_FLAG_AVOID_AUDIO_ONLY;

			if ((flags & ADAPTATION_SETS_FLAG_EXCLUDE_MUXED_AUDIO) != 0)
			{
				muxed_audio_key = &first_audio_group->key;
				vod_queue_remove(&first_audio_group->list_node);	// do not output this label separately
			}
		}

		output->first_by_type[ADAPTATION_TYPE_MUXED] = cur_adaptation_set;
		rc = manifest_utils_get_muxed_adaptation_set(
			request_context,
			media_set,
			flags,
			muxed_audio_key,
			cur_adaptation_set);
		if (rc != VOD_OK)
		{
			return rc;
		}
		cur_adaptation_set++;
	}

	// initialize all other adaptation sets
	for (media_type = MEDIA_TYPE_VIDEO; media_type < MEDIA_TYPE_COUNT; media_type++)
	{
		if (output->count[media_type] <= 0)
		{
			continue;
		}

		output->first_by_type[media_type] = cur_adaptation_set;

		cur_adaptation_set = track_groups_to_adaptation_sets(
			&groups[media_type],
			&cur_track_ptr,
			cur_adaptation_set);
	}

	output->first = adaptation_sets;
	output->last = adaptation_sets + adaptation_sets_count;
	output->total_count = adaptation_sets_count;

	return VOD_OK;
}
Beispiel #7
0
static void
filter_scale_video_tracks(filters_init_state_t* state, media_clip_t* clip, uint32_t speed_nom, uint32_t speed_denom)
{
	media_clip_rate_filter_t* rate_filter;
	media_clip_source_t* source;
	media_track_t* new_track;
	media_track_t* cur_track;
	media_clip_t** cur_source;
	media_clip_t** sources_end;

	if (clip->type == MEDIA_CLIP_SOURCE)
	{
		source = vod_container_of(clip, media_clip_source_t, base);

		if (state->source_count == 0)
		{
			state->output_clip->mvhd_atom = source->track_array.mvhd_atom;
		}

		for (cur_track = source->track_array.first_track;
			cur_track < source->track_array.last_track;
			cur_track++)
		{
			switch (cur_track->media_info.media_type)
			{
			case MEDIA_TYPE_VIDEO:
				new_track = filter_copy_track_to_clip(state, cur_track);
				if (speed_nom != speed_denom)
				{
					rate_filter_scale_track_timestamps(new_track, speed_nom, speed_denom);
				}
				break;

			case MEDIA_TYPE_AUDIO:
				if (state->audio_reference_track == NULL)
				{
					state->audio_reference_track = cur_track;
					state->audio_reference_track_speed_nom = speed_nom;
					state->audio_reference_track_speed_denom = speed_denom;
				}
				if (cur_track->frame_count > 0)
				{
					state->has_audio_frames = TRUE;
				}
				break;
			}
		}

		state->source_count++;
		return;
	}

	// recursively filter sources
	switch (clip->type)
	{
	case MEDIA_CLIP_RATE_FILTER:
		rate_filter = vod_container_of(clip, media_clip_rate_filter_t, base);
		speed_nom = ((uint64_t)speed_nom * rate_filter->rate.nom) / rate_filter->rate.denom;
		break;

	default:;
	}

	sources_end = clip->sources + clip->source_count;
	for (cur_source = clip->sources; cur_source < sources_end; cur_source++)
	{
		filter_scale_video_tracks(state, *cur_source, speed_nom, speed_denom);
	}
}
static vod_status_t 
audio_filter_init_sources_and_graph_desc(audio_filter_init_context_t* state, media_clip_t* clip)
{
	audio_filter_source_t* cur_source;
	frame_list_part_t* part;
	media_clip_t** sources_end;
	media_clip_t** sources_cur;
	media_clip_source_t* source;
	media_track_t* audio_track;
	media_track_t* cur_track;
	u_char filter_name[VOD_INT32_LEN + 1];
	vod_status_t rc;
	input_frame_t* cur_frame;
	input_frame_t* last_frame;

	if (clip->type == MEDIA_CLIP_SOURCE)
	{
		source = vod_container_of(clip, media_clip_source_t, base);

		// find the audio track
		audio_track = NULL;
		for (cur_track = source->track_array.first_track; cur_track < source->track_array.last_track; cur_track++)
		{
			if (cur_track->media_info.media_type == MEDIA_TYPE_AUDIO)
			{
				audio_track = cur_track;
				break;
			}
		}

		// update the max frame size
		part = &audio_track->frames;
		last_frame = part->last_frame;
		for (cur_frame = part->first_frame;; cur_frame++)
		{
			if (cur_frame >= last_frame)
			{
				if (part->next == NULL)
				{
					break;
				}
				part = part->next;
				cur_frame = part->first_frame;
				last_frame = part->last_frame;
			}

			if (cur_frame->size > state->max_frame_size)
			{
				state->max_frame_size = cur_frame->size;
			}
		}

		// initialize the source
		cur_source = state->cur_source;
		state->cur_source++;

		cur_source->cur_frame_part = audio_track->frames;
		cur_source->cur_frame = audio_track->frames.first_frame;

		cur_source->cur_frame_part.frames_source->set_cache_slot_id(
			cur_source->cur_frame_part.frames_source_context,
			state->cache_slot_id++);

		vod_sprintf(filter_name, "%uD%Z", clip->id);

		return audio_filter_init_source(
			state->request_context,
			state->filter_graph,
			filter_name,
			&audio_track->media_info,
			cur_source,
			state->outputs);
	}

	// recursively init the child sources
	sources_end = clip->sources + clip->source_count;
	for (sources_cur = clip->sources; sources_cur < sources_end; sources_cur++)
	{
		if (*sources_cur == NULL)
		{
			continue;
		}

		rc = audio_filter_init_sources_and_graph_desc(state, *sources_cur);
		if (rc != VOD_OK)
		{
			return rc;
		}
	}

	// add the filter description
	if (state->graph_desc_pos > state->graph_desc)
	{
		*state->graph_desc_pos++ = ';';
	}

	state->graph_desc_pos = clip->audio_filter->append_filter_desc(state->graph_desc_pos, clip);

	return VOD_OK;
}
static vod_status_t 
audio_filter_walk_filters_prepare_init(
	audio_filter_init_context_t* state, 
	media_clip_t** clip_ptr, 
	uint32_t speed_nom, 
	uint32_t speed_denom)
{
	media_clip_rate_filter_t* rate_filter;
	media_clip_source_t* source;
	media_track_t* audio_track;
	media_track_t* cur_track;
	media_clip_t** sources_end;
	media_clip_t** sources_cur;
	media_clip_t* clip = *clip_ptr;
	media_clip_t* last_source = NULL;
	vod_status_t rc;
	uint32_t cur_frame_count;
	uint32_t source_count;

	switch (clip->type)
	{
	case MEDIA_CLIP_SOURCE:
		source = vod_container_of(clip, media_clip_source_t, base);

		audio_track = NULL;
		for (cur_track = source->track_array.first_track; cur_track < source->track_array.last_track; cur_track++)
		{
			if (cur_track->media_info.media_type != MEDIA_TYPE_AUDIO)
			{
				continue;
			}

			if (audio_track != NULL)
			{
				vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
					"audio_filter_walk_filters_prepare_init: more than one audio track per source - unsupported");
				return VOD_BAD_REQUEST;
			}

			audio_track = cur_track;
		}

		if (audio_track == NULL || audio_track->frame_count == 0)
		{
			*clip_ptr = NULL;
			return VOD_OK;
		}

		state->source_count++;

		cur_frame_count = ((uint64_t)audio_track->frame_count * speed_denom) / speed_nom;
		if (state->output_frame_count < cur_frame_count)
		{
			state->output_frame_count = cur_frame_count;
		}
		return VOD_OK;

	case MEDIA_CLIP_RATE_FILTER:
		rate_filter = vod_container_of(clip, media_clip_rate_filter_t, base);
		speed_nom = ((uint64_t)speed_nom * rate_filter->rate.nom) / rate_filter->rate.denom;
		break;

	default:;
	}

	// recursively prepare the child sources
	source_count = 0;

	sources_end = clip->sources + clip->source_count;
	for (sources_cur = clip->sources; sources_cur < sources_end; sources_cur++)
	{
		rc = audio_filter_walk_filters_prepare_init(state, sources_cur, speed_nom, speed_denom);
		if (rc != VOD_OK)
		{
			return rc;
		}

		if (*sources_cur != NULL)
		{
			source_count++;
			last_source = *sources_cur;
		}
	}

	// skip the current filter when it's not needed
	switch (source_count)
	{
	case 0:
		*clip_ptr = NULL;
		return VOD_OK;

	case 1:
		switch (clip->type)
		{
		case MEDIA_CLIP_MIX_FILTER:
		case MEDIA_CLIP_CONCAT_FILTER:
			// in case of mixing a single clip or concat, skip the filter
			*clip_ptr = last_source;
			return VOD_OK;

		default:;
		}
		break;
	}

	// update the graph description size
	state->graph_desc_size += clip->audio_filter->get_filter_desc_size(clip) + 1;	// 1 = ';'

	return VOD_OK;
}