static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux,
	const struct tspp_data_descriptor *tspp_data_desc)
{
	u32 notif_size;
	int i;

	notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
	for (i = 0; i < notif_size; i++)
		dvb_dmx_swfilter_packet(&mpq_demux->demux,
			((u8 *)tspp_data_desc->virt_base) +
			i * TSPP_RAW_TTS_SIZE,
			((u8 *)tspp_data_desc->virt_base) +
			i * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
}
Пример #2
0
void demultiplexDvbPackets(struct dvb_demux* demux, const u8 *buf, int count)
{
  int first = 0;
  int next = 0;
  int cnt = 0;
  int diff_count;
  const u8 *first_buf;
  u16 pid, firstPid;

  struct DeviceContext_s* Context = (struct DeviceContext_s*)demux->priv;

  /* Group the packets by the PIDs and feed them into the kernel demuxer.
     If there is data for the decoder we will be informed via the callback.
     After the demuxer finished its work on the packet block that block is
     fed into the decoder if required.
     This workaround eliminates the scheduling bug caused by waiting while
     the demux spin is locked. */

#if DVB_API_VERSION > 3

  while (count > 0)
  {
    first = next;
    cnt = 0;
    firstPid = ts_pid(&buf[first]);
    while(count > 0)
    {
      count--;
      next += 188;
      cnt++;
      pid = ts_pid(&buf[next]);
      if((pid != firstPid) || (cnt > 8))
          break;
    }
    if((next - first) > 0)
    {
      mutex_lock_interruptible(&Context->injectMutex);

      /* reset the flag (to be set by the callback */
      Context->provideToDecoder = 0;
      dvb_dmx_swfilter_packets(demux, buf + first, cnt);
      if(Context->provideToDecoder)
      {
        /* the demuxer indicated that the packets are for the decoder */
        writeToDecoder(demux, Context->feedPesType, buf + first, next - first);
      }
      mutex_unlock(&Context->injectMutex);
    }
  }
#else

  firstPid = ts_pid(&buf[first]);
  while(count)
  {
    count--;
    next += 188;
    cnt++;
    if(cnt > 8 || ts_pid(&buf[next]) != firstPid || !count || buf[next] != 0x47)
    {
      diff_count = next - first;
      first_buf = buf + first;

      mutex_lock_interruptible(&Context->injectMutex);

      // reset the flag (to be set by the callback //
      Context->provideToDecoder = 0;

      spin_lock(&demux->lock);

      dvb_dmx_swfilter_packet(demux, first_buf, diff_count);
      spin_unlock(&demux->lock);

      // the demuxer indicated that the packets are for the decoder //
      if(Context->provideToDecoder)
        writeToDecoder(demux, Context->feedPesType, first_buf, diff_count);

      mutex_unlock(&Context->injectMutex);

      first = next;
      cnt = 0;
      firstPid = ts_pid(&buf[first]);
    }
  }

#endif
}
/**
 * Demux thread function handling data from specific TSIF.
 *
 * @arg: TSIF number
 */
static int mpq_dmx_tspp_thread(void *arg)
{
	int tsif = (int)arg;
	struct mpq_demux *mpq_demux;
	const struct tspp_data_descriptor *tspp_data_desc;
	atomic_t *data_cnt;
	u32 notif_size;
	int channel_id;
	int ref_count;
	int ret;
	int j;

	do {
		ret = wait_event_interruptible(
			mpq_dmx_tspp_info.tsif[tsif].wait_queue,
			atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) ||
			kthread_should_stop());

		if ((ret < 0) || kthread_should_stop()) {
			MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
			break;
		}

		/* Lock against the TSPP filters data-structure */
		if (mutex_lock_interruptible(
			&mpq_dmx_tspp_info.tsif[tsif].mutex))
			return -ERESTARTSYS;

		channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);

		ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
		data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;

		/* Make sure channel is still active */
		if (ref_count == 0) {
			mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
			continue;
		}

		atomic_dec(data_cnt);

		mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
		mpq_demux->hw_notification_size = 0;

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC != allocation_mode &&
			mpq_sdmx_is_loaded())
			pr_err_once(
				"%s: TSPP Allocation mode does not support secure demux.\n",
				__func__);

		if (MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC == allocation_mode &&
			mpq_sdmx_is_loaded()) {
			mpq_dmx_tspp_aggregated_process(tsif, channel_id);
		} else {
			/*
			 * Go through all filled descriptors
			 * and perform demuxing on them
			 */
			while ((tspp_data_desc = tspp_get_buffer(0, channel_id))
					!= NULL) {
				notif_size = tspp_data_desc->size /
					TSPP_RAW_TTS_SIZE;
				mpq_demux->hw_notification_size += notif_size;

				for (j = 0; j < notif_size; j++)
					dvb_dmx_swfilter_packet(
					 &mpq_demux->demux,
					 ((u8 *)tspp_data_desc->virt_base) +
					 j * TSPP_RAW_TTS_SIZE,
					 ((u8 *)tspp_data_desc->virt_base) +
					 j * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
				/*
				 * Notify TSPP that the buffer
				 * is no longer needed
				 */
				tspp_release_buffer(0, channel_id,
					tspp_data_desc->id);
			}
		}

		if (mpq_demux->hw_notification_size &&
			(mpq_demux->hw_notification_size <
			mpq_demux->hw_notification_min_size))
			mpq_demux->hw_notification_min_size =
				mpq_demux->hw_notification_size;

		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
	} while (1);

	return 0;
}