Ejemplo n.º 1
0
static void
recover_pts(tsfix_t *tf, tfstream_t *tfs, th_pkt_t *pkt)
{
  th_pktref_t *pr, *srch;

  pktref_enqueue(&tf->tf_ptsq, pkt);

  while((pr = TAILQ_FIRST(&tf->tf_ptsq)) != NULL) {
    
    pkt = pr->pr_pkt;
    tfs = tfs_find(tf, pkt);

    switch(tfs->tfs_type) {

    case SCT_MPEG2VIDEO:

      switch(pkt->pkt_frametype) {
      case PKT_B_FRAME:
	/* B-frames have same PTS as DTS, pass them on */
	pkt->pkt_pts = pkt->pkt_dts;
	tsfixprintf("TSFIX: %-12s PTS b-frame set to %"PRId64"\n",
		    streaming_component_type2txt(tfs->tfs_type),
		    pkt->pkt_dts);
	break;
      
      case PKT_I_FRAME:
      case PKT_P_FRAME:
	/* Presentation occures at DTS of next I or P frame,
	   try to find it */
	srch = TAILQ_NEXT(pr, pr_link);
	while(1) {
	  if(srch == NULL)
	    return; /* not arrived yet, wait */
	  if(tfs_find(tf, srch->pr_pkt) == tfs && 
	     srch->pr_pkt->pkt_frametype <= PKT_P_FRAME) {
	    pkt->pkt_pts = srch->pr_pkt->pkt_dts;
	    tsfixprintf("TSFIX: %-12s PTS *-frame set to %"PRId64"\n",
			streaming_component_type2txt(tfs->tfs_type),
			pkt->pkt_pts);
	    break;
	  }
	  srch = TAILQ_NEXT(srch, pr_link);
	}
	break;
      }
      break;

    default:
      break;
    }

    TAILQ_REMOVE(&tf->tf_ptsq, pr, pr_link);
    normalize_ts(tf, tfs, pkt);
    free(pr);
  }
}
Ejemplo n.º 2
0
static void
tsfix_input_packet(tsfix_t *tf, streaming_message_t *sm)
{
  th_pkt_t *pkt = pkt_copy_shallow(sm->sm_data);
  tfstream_t *tfs = tfs_find(tf, pkt);
  streaming_msg_free(sm);
  
  if(tfs == NULL || dispatch_clock < tf->tf_start_time) {
    pkt_ref_dec(pkt);
    return;
  }

  if(tf->tf_tsref == PTS_UNSET &&
     (!tf->tf_hasvideo ||
      (SCT_ISVIDEO(tfs->tfs_type) && pkt->pkt_frametype == PKT_I_FRAME))) {
      tf->tf_tsref = pkt->pkt_dts & PTS_MASK;
      tsfixprintf("reference clock set to %"PRId64"\n", tf->tf_tsref);
  }

  if(pkt->pkt_dts == PTS_UNSET) {

    int pdur = pkt->pkt_duration >> pkt->pkt_field;

    if(tfs->tfs_last_dts_in == PTS_UNSET) {
      pkt_ref_dec(pkt);
      return;
    }

    pkt->pkt_dts = (tfs->tfs_last_dts_in + pdur) & PTS_MASK;

    tsfixprintf("TSFIX: %-12s DTS set to last %"PRId64" +%d == %"PRId64"\n",
		streaming_component_type2txt(tfs->tfs_type),
		tfs->tfs_last_dts_in, pdur, pkt->pkt_dts);
  }
Ejemplo n.º 3
0
static void
tsfix_input_packet(tsfix_t *tf, streaming_message_t *sm)
{
  th_pkt_t *pkt = pkt_copy_shallow(sm->sm_data);
  tfstream_t *tfs = tfs_find(tf, pkt);
  streaming_msg_free(sm);
  
  if(tfs == NULL || dispatch_clock < tf->tf_start_time) {
    pkt_ref_dec(pkt);
    return;
  }

  if(tf->tf_tsref == PTS_UNSET &&
     (!tf->tf_hasvideo ||
      (SCT_ISVIDEO(tfs->tfs_type) && pkt->pkt_frametype == PKT_I_FRAME))) {
      tf->tf_tsref = pkt->pkt_dts & PTS_MASK;
      tsfixprintf("reference clock set to %"PRId64"\n", tf->tf_tsref);
  } else {
    /* For teletext, the encoders might use completely different timestamps */
    /* If the difference is greater than 2 seconds, use the actual dts value */
    if (tfs->tfs_type == SCT_TELETEXT && tfs->tfs_local_ref == PTS_UNSET &&
        tf->tf_tsref != PTS_UNSET && pkt->pkt_dts != PTS_UNSET) {
      int64_t diff = tsfix_ts_diff(tf->tf_tsref, pkt->pkt_dts);
      if (diff > 2 * 90000) {
        tfstream_t *tfs2;
        tvhwarn("parser", "The timediff for TELETEXT is big (%"PRId64"), using current dts", diff);
        tfs->tfs_local_ref = pkt->pkt_dts;
        /* Text subtitles extracted from teletext have same timebase */
        LIST_FOREACH(tfs2, &tf->tf_streams, tfs_link)
          if(tfs2->tfs_type == SCT_TEXTSUB)
            tfs2->tfs_local_ref = pkt->pkt_dts;
      } else {