예제 #1
0
void
video_deliver_frame(video_decoder_t *vd, frame_buffer_type_t type, void *frame,
		    const frame_info_t *info)
{
  vd->vd_skip = 0;
  mp_set_current_time(vd->vd_mp, info->fi_time, info->fi_epoch);

  vd->vd_frame_deliver(type, frame, info, vd->vd_opaque);
  
  video_decoder_scan_ext_sub(vd, info->fi_time);
}
예제 #2
0
void
video_deliver_frame(video_decoder_t *vd, frame_buffer_type_t type, void *frame,
		    const frame_info_t *info, int send_pts)
{
  event_ts_t *ets;
  
  vd->vd_skip = 0;

  if(info->pts != AV_NOPTS_VALUE && send_pts) {
    ets = event_create(EVENT_CURRENT_PTS, sizeof(event_ts_t));
    ets->ts = info->pts;
    mp_enqueue_event(vd->vd_mp, &ets->h);
    event_release(&ets->h);
  }

  vd->vd_frame_deliver(type, frame, info, vd->vd_opaque);
  
  video_decoder_scan_ext_sub(vd, info->pts);
}
예제 #3
0
void
video_deliver_frame(video_decoder_t *vd,
		    media_pipe_t *mp, media_queue_t *mq,
		    AVCodecContext *ctx, AVFrame *frame,
		    const media_buf_t *mb, int decode_time)
{
  event_ts_t *ets;
  frame_info_t fi;

  if(mb->mb_time != AV_NOPTS_VALUE)
    mp_set_current_time(mp, mb->mb_time);

  /* Compute aspect ratio */
  switch(mb->mb_aspect_override) {
  case 0:

    if(frame->pan_scan != NULL && frame->pan_scan->width != 0) {
      fi.dar.num = frame->pan_scan->width;
      fi.dar.den = frame->pan_scan->height;
    } else {
      fi.dar.num = ctx->width;
      fi.dar.den = ctx->height;
    }

    if(ctx->sample_aspect_ratio.num)
      fi.dar = av_mul_q(fi.dar, ctx->sample_aspect_ratio);
    break;
  case 1:
    fi.dar = (AVRational){4,3};
    break;
  case 2:
    fi.dar = (AVRational){16,9};
    break;
  }

  int64_t pts = mb->mb_pts;

  /* Compute duration and PTS of frame */
  if(pts == AV_NOPTS_VALUE && mb->mb_dts != AV_NOPTS_VALUE &&
     (ctx->has_b_frames == 0 || frame->pict_type == FF_B_TYPE)) {
    pts = mb->mb_dts;
  }

  int duration = mb->mb_duration;

  if(!vd_valid_duration(duration)) {
    /* duration is zero or very invalid, use duration from last output */
    duration = vd->vd_estimated_duration;
  }

  if(pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE)
    pts = vd->vd_nextpts; /* no pts set, use estimated pts */

  if(pts != AV_NOPTS_VALUE && vd->vd_prevpts != AV_NOPTS_VALUE) {
    /* we know PTS of a prior frame */
    int64_t t = (pts - vd->vd_prevpts) / vd->vd_prevpts_cnt;

    if(vd_valid_duration(t)) {
      /* inter frame duration seems valid, store it */
      vd->vd_estimated_duration = t;
      if(duration == 0)
	duration = t;

    } else if(t < 0 || t > 10000000LL) {
      /* PTS discontinuity, use estimated PTS from last output instead */
      pts = vd->vd_nextpts;
    }
  }
  
  duration += frame->repeat_pict * duration / 2;
 
  if(pts != AV_NOPTS_VALUE) {
    vd->vd_prevpts = pts;
    vd->vd_prevpts_cnt = 0;
  }
  vd->vd_prevpts_cnt++;

  if(duration == 0) {
    TRACE(TRACE_DEBUG, "Video", "Dropping frame with duration = 0");
    return;
  }

  prop_set_int(mq->mq_prop_too_slow, decode_time > duration);

  if(pts != AV_NOPTS_VALUE) {
    vd->vd_nextpts = pts + duration;

    if(mb->mb_send_pts) {
      ets = event_create(EVENT_CURRENT_PTS, sizeof(event_ts_t));
      ets->ts = pts;
      mp_enqueue_event(mp, &ets->h);
      event_release(&ets->h);
    }

  } else {
    vd->vd_nextpts = AV_NOPTS_VALUE;
  }

  vd->vd_interlaced |=
    frame->interlaced_frame && !mb->mb_disable_deinterlacer;

  fi.width = ctx->width;
  fi.height = ctx->height;
  fi.pix_fmt = ctx->pix_fmt;
  fi.pts = pts;
  fi.epoch = mb->mb_epoch;
  fi.duration = duration;

  fi.interlaced = !!vd->vd_interlaced;
  fi.tff = !!frame->top_field_first;
  fi.prescaled = 0;

  fi.color_space = ctx->colorspace;
  fi.color_range = ctx->color_range;

  vd->vd_frame_deliver(frame->data, frame->linesize, &fi, vd->vd_opaque);

  video_decoder_scan_ext_sub(vd, fi.pts);
}