static void mpq_dmx_tspp_aggregated_process(int tsif, int channel_id)
{
	const struct tspp_data_descriptor *tspp_data_desc;
	struct mpq_demux *mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
	struct sdmx_buff_descr input;
	size_t aggregate_len = 0;
	size_t aggregate_count = 0;
	phys_addr_t buff_start_addr_phys;
	phys_addr_t buff_current_addr_phys = 0;
	u32 notif_size;
	int i;

	while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
		if (0 == aggregate_count)
			buff_current_addr_phys = tspp_data_desc->phys_base;
		notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
		mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] =
			tspp_data_desc->id;
		aggregate_len += tspp_data_desc->size;
		aggregate_count++;
		mpq_demux->hw_notification_size += notif_size;

		
		if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
			mpq_dmx_tspp_swfilter_desc(mpq_demux, tspp_data_desc);

	}

	if (!aggregate_count)
		return;

	buff_start_addr_phys =
		mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;

	if (buff_start_addr_phys > 0xFFFFFFFF)
		MPQ_DVB_ERR_PRINT(
			"%s: WARNNING - physical address %pa is larger than 32bits!\n",
			__func__, &buff_start_addr_phys);

	input.base_addr = (void *)(u32)buff_start_addr_phys;
	input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count *
		TSPP_DESCRIPTOR_SIZE;

	if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
		MPQ_DVB_DBG_PRINT(
			"%s: SDMX Processing %d descriptors: %d bytes at start address 0x%x, read offset %d\n",
			__func__, aggregate_count, aggregate_len,
			(unsigned int)input.base_addr,
			(int)(buff_current_addr_phys - buff_start_addr_phys));

		mpq_sdmx_process(mpq_demux, &input, aggregate_len,
			buff_current_addr_phys - buff_start_addr_phys,
			TSPP_RAW_TTS_SIZE);
	}

	for (i = 0; i < aggregate_count; i++)
		tspp_release_buffer(0, channel_id,
			mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[i]);
}
/**
 * Demux TS packets from TSPP by secure-demux.
 * The fucntion assumes the buffer is physically contiguous
 * and that TSPP descriptors are continuous in memory.
 *
 * @tsif: The TSIF interface to process its packets
 * @channel_id: the TSPP output pipe with the TS packets
 */
static void mpq_dmx_tspp_aggregated_process(int tsif, int channel_id)
{
	const struct tspp_data_descriptor *tspp_data_desc;
	struct mpq_demux *mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
	struct sdmx_buff_descr input;
	size_t aggregate_len = 0;
	size_t aggregate_count = 0;
	phys_addr_t buff_start_addr;
	phys_addr_t buff_current_addr;
	int i;

	while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
		if (0 == aggregate_count)
			buff_current_addr = tspp_data_desc->phys_base;
		mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] =
			tspp_data_desc->id;
		aggregate_len += tspp_data_desc->size;
		aggregate_count++;
		mpq_demux->hw_notification_size +=
			tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
	}

	if (!aggregate_count)
		return;

	buff_start_addr = mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
	input.base_addr = (void *)buff_start_addr;
	input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count *
		TSPP_DESCRIPTOR_SIZE;

	MPQ_DVB_DBG_PRINT(
		"%s: Processing %d descriptors: %d bytes at start address 0x%x, read offset %d\n",
		__func__, aggregate_count, aggregate_len,
		(unsigned int)input.base_addr,
		buff_current_addr - buff_start_addr);

	mpq_sdmx_process(mpq_demux, &input, aggregate_len,
		 buff_current_addr - buff_start_addr);

	for (i = 0; i < aggregate_count; i++)
		tspp_release_buffer(0, channel_id,
			mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[i]);
}
/**
 * Demux thread function handling data from specific TSIF.
 *
 * @arg: TSIF number
 */
static int mpq_dmx_tspp_thread(void *arg)
{
	int tsif = (int)arg;
	struct mpq_demux *mpq_demux;
	const struct tspp_data_descriptor *tspp_data_desc;
	atomic_t *data_cnt;
	u32 notif_size;
	int channel_id;
	int ref_count;
	int ret;
	int j;

	do {
		ret = wait_event_interruptible(
			mpq_dmx_tspp_info.tsif[tsif].wait_queue,
			atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) ||
			kthread_should_stop());

		if ((ret < 0) || kthread_should_stop()) {
			MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
			break;
		}

		/* Lock against the TSPP filters data-structure */
		if (mutex_lock_interruptible(
			&mpq_dmx_tspp_info.tsif[tsif].mutex))
			return -ERESTARTSYS;

		channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);

		ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
		data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;

		/* Make sure channel is still active */
		if (ref_count == 0) {
			mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
			continue;
		}

		atomic_dec(data_cnt);

		mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
		mpq_demux->hw_notification_size = 0;

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC != allocation_mode &&
			mpq_sdmx_is_loaded())
			pr_err_once(
				"%s: TSPP Allocation mode does not support secure demux.\n",
				__func__);

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC == allocation_mode &&
			mpq_sdmx_is_loaded()) {
			mpq_dmx_tspp_aggregated_process(tsif, channel_id);
		} else {
			/*
			 * Go through all filled descriptors
			 * and perform demuxing on them
			 */
			while ((tspp_data_desc = tspp_get_buffer(0, channel_id))
					!= NULL) {
				notif_size = tspp_data_desc->size /
					TSPP_RAW_TTS_SIZE;
				mpq_demux->hw_notification_size += notif_size;

				for (j = 0; j < notif_size; j++)
					dvb_dmx_swfilter_packet(
					 &mpq_demux->demux,
					 ((u8 *)tspp_data_desc->virt_base) +
					 j * TSPP_RAW_TTS_SIZE,
					 ((u8 *)tspp_data_desc->virt_base) +
					 j * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
				/*
				 * Notify TSPP that the buffer
				 * is no longer needed
				 */
				tspp_release_buffer(0, channel_id,
					tspp_data_desc->id);
			}
		}

		if (mpq_demux->hw_notification_size &&
			(mpq_demux->hw_notification_size <
			mpq_demux->hw_notification_min_size))
			mpq_demux->hw_notification_min_size =
				mpq_demux->hw_notification_size;

		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	} while (1);

	return 0;
}
static int mpq_dmx_tspp_thread(void *arg)
{
	int tsif = (int)arg;
	struct mpq_demux *mpq_demux;
	const struct tspp_data_descriptor *tspp_data_desc;
	atomic_t *data_cnt;
	u32 notif_size;
	int channel_id;
	int ref_count;
	int ret;

	do {
		ret = wait_event_interruptible(
			mpq_dmx_tspp_info.tsif[tsif].wait_queue,
			atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) ||
			kthread_should_stop());

		if ((ret < 0) || kthread_should_stop()) {
			MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
			break;
		}

		
		if (mutex_lock_interruptible(
			&mpq_dmx_tspp_info.tsif[tsif].mutex))
			return -ERESTARTSYS;

		channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);

		ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
		data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;

		
		if (ref_count == 0) {
			mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
			continue;
		}

		atomic_dec(data_cnt);

		mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
		mpq_demux->hw_notification_size = 0;

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC != allocation_mode &&
			mpq_sdmx_is_loaded())
			pr_err_once(
				"%s: TSPP Allocation mode does not support secure demux.\n",
				__func__);

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC == allocation_mode &&
			mpq_sdmx_is_loaded()) {
			mpq_dmx_tspp_aggregated_process(tsif, channel_id);
		} else {
			while ((tspp_data_desc = tspp_get_buffer(0, channel_id))
					!= NULL) {
				notif_size = tspp_data_desc->size /
					TSPP_RAW_TTS_SIZE;
				mpq_demux->hw_notification_size += notif_size;

				mpq_dmx_tspp_swfilter_desc(mpq_demux,
					tspp_data_desc);
				tspp_release_buffer(0, channel_id,
					tspp_data_desc->id);
			}
		}

		if (mpq_demux->hw_notification_size &&
			(mpq_demux->hw_notification_size <
			mpq_demux->hw_notification_min_size))
			mpq_demux->hw_notification_min_size =
				mpq_demux->hw_notification_size;

		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	} while (1);

	return 0;
}