static void emit_frame(video_decoder_t *vd, vdec_pic_t *vp) { vd->vd_estimated_duration = vp->fi.fi_duration; // For bitrate calculations if(vp->fi.fi_pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE) vp->fi.fi_pts = vd->vd_nextpts; if(vp->fi.fi_pts != AV_NOPTS_VALUE) vd->vd_nextpts = vp->fi.fi_pts + vp->fi.fi_duration; #if VDEC_DETAILED_DEBUG static int64_t lastpts; TRACE(TRACE_DEBUG, "VDEC DPY", "Displaying 0x%llx (%lld) d:%lld dur=%d %d x %d", vp->order, vp->fi.fi_pts, vp->fi.fi_pts - lastpts, vp->fi.fi_duration, vp->fi.fi_width, vp->fi.fi_height); lastpts = vp->fi.fi_pts; #endif vp->fi.fi_type = 'RSX'; if(video_deliver_frame(vd, &vp->fi) == -1) // Frame not accepted, free it rsx_free(vp->vp_offset[0], vp->vp_size); #if VDEC_DETAILED_DEBUG TRACE(TRACE_DEBUG, "VDEC DPY", "Frame delivered"); #endif }
static void emit_frame(video_decoder_t *vd, vdec_pic_t *vp) { vd->vd_estimated_duration = vp->fi.fi_duration; // For bitrate calculations if(vp->fi.fi_pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE) vp->fi.fi_pts = vd->vd_nextpts; if(vp->fi.fi_pts != AV_NOPTS_VALUE) vd->vd_nextpts = vp->fi.fi_pts + vp->fi.fi_duration; #if VDEC_DETAILED_DEBUG static int64_t lastpts; TRACE(TRACE_DEBUG, "VDEC DPY", "Displaying 0x%llx (%lld) d:%lld dur=%d", vp->order, vp->fi.fi_pts, vp->fi.fi_pts - lastpts, vp->fi.fi_duration); lastpts = vp->fi.fi_pts; #endif vp->fi.fi_type = 'RSX'; video_deliver_frame(vd, &vp->fi); #if VDEC_DETAILED_DEBUG TRACE(TRACE_DEBUG, "VDEC DPY", "Frame delivered"); #endif }
static void vd_decode_video(video_decoder_t *vd, media_queue_t *mq, media_buf_t *mb) { int got_pic = 0; media_pipe_t *mp = vd->vd_mp; media_codec_t *cw = mb->mb_cw; AVCodecContext *ctx = cw->codec_ctx; AVFrame *frame = vd->vd_frame; int t; if(vd->vd_do_flush) { AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; do { avcodec_decode_video2(ctx, frame, &got_pic, &avpkt); } while(got_pic); vd->vd_do_flush = 0; vd->vd_prevpts = AV_NOPTS_VALUE; vd->vd_nextpts = AV_NOPTS_VALUE; vd->vd_estimated_duration = 0; avcodec_flush_buffers(ctx); vd->vd_compensate_thres = 5; } ctx->opaque = mb; ctx->get_buffer = vd_get_buffer; ctx->release_buffer = vd_release_buffer; /* * If we are seeking, drop any non-reference frames */ ctx->skip_frame = mb->mb_skip == 1 ? AVDISCARD_NONREF : AVDISCARD_DEFAULT; avgtime_start(&vd->vd_decode_time); AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = mb->mb_data; avpkt.size = mb->mb_size; avcodec_decode_video2(ctx, frame, &got_pic, &avpkt); t = avgtime_stop(&vd->vd_decode_time, mq->mq_prop_decode_avg, mq->mq_prop_decode_peak); if(mp->mp_stats) mp_set_mq_meta(mq, cw->codec, cw->codec_ctx); mb = frame->opaque; if(got_pic == 0 || mb->mb_skip == 1) return; vd->vd_skip = 0; video_deliver_frame(vd, mp, mq, ctx, frame, mb, t); }
static void emit_frame(vda_decoder_t *vdad, vda_frame_t *vf, media_queue_t *mq) { int i; CGSize siz; frame_info_t fi; memset(&fi, 0, sizeof(fi)); CVPixelBufferLockBaseAddress(vf->vf_buf, 0); for(i = 0; i < 3; i++ ) { fi.fi_data[i] = CVPixelBufferGetBaseAddressOfPlane(vf->vf_buf, i); fi.fi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(vf->vf_buf, i); } if(vdad->vdad_last_pts != PTS_UNSET && vf->vf_pts != PTS_UNSET) { int64_t d = vf->vf_pts - vdad->vdad_last_pts; if(d > 1000 && d < 1000000) vdad->vdad_estimated_duration = d; } siz = CVImageBufferGetEncodedSize(vf->vf_buf); fi.fi_type = 'YUVP'; fi.fi_width = siz.width; fi.fi_height = siz.height; fi.fi_duration = vf->vf_duration > 10000 ? vf->vf_duration : vdad->vdad_estimated_duration; siz = CVImageBufferGetDisplaySize(vf->vf_buf); fi.fi_dar_num = siz.width; fi.fi_dar_den = siz.height; fi.fi_pts = vf->vf_pts; fi.fi_color_space = -1; fi.fi_epoch = vf->vf_epoch; fi.fi_drive_clock = 1; fi.fi_vshift = 1; fi.fi_hshift = 1; video_decoder_t *vd = vdad->vdad_vd; vd->vd_estimated_duration = fi.fi_duration; // For bitrate calculations if(fi.fi_duration > 0) video_deliver_frame(vd, &fi); CVPixelBufferUnlockBaseAddress(vf->vf_buf, 0); vdad->vdad_last_pts = vf->vf_pts; char fmt[64]; snprintf(fmt, sizeof(fmt), "h264 (VDA) %d x %d", fi.fi_width, fi.fi_height); prop_set_string(mq->mq_prop_codec, fmt); }
static void vdpau_decode(struct media_codec *mc, struct video_decoder *vd, struct media_queue *mq, struct media_buf *mb, int reqsize) { media_codec_t *cw = mb->mb_cw; AVCodecContext *ctx = cw->codec_ctx; vdpau_codec_t *vc = mc->opaque; media_pipe_t *mp = vd->vd_mp; vdpau_video_surface_t *vvs; int got_pic = 0; AVFrame *frame = vd->vd_frame; if(vd->vd_do_flush) { AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; do { avcodec_decode_video2(ctx, frame, &got_pic, &avpkt); } while(got_pic); vd->vd_do_flush = 0; vd->vd_prevpts = AV_NOPTS_VALUE; vd->vd_nextpts = AV_NOPTS_VALUE; vd->vd_estimated_duration = 0; avcodec_flush_buffers(ctx); vd->vd_compensate_thres = 5; } ctx->skip_frame = mb->mb_skip == 1 ? AVDISCARD_NONREF : AVDISCARD_NONE; if(mb->mb_skip == 2) vd->vd_skip = 1; vc->vc_mb = mb; AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = mb->mb_data; avpkt.size = mb->mb_size; avcodec_decode_video2(ctx, frame, &got_pic, &avpkt); if(mp->mp_stats) mp_set_mq_meta(mq, cw->codec, cw->codec_ctx); if(!got_pic || mb->mb_skip == 1) return; vd->vd_skip = 0; vvs = frame->opaque; video_deliver_frame(vd, vd->vd_mp, mq, mb, ctx, frame, vvs->vvs_time, vvs->vvs_pts, vvs->vvs_dts, vvs->vvs_duration, vvs->vvs_epoch, 0); }
void video_deliver_frame_avctx(video_decoder_t *vd, media_pipe_t *mp, media_queue_t *mq, AVCodecContext *ctx, AVFrame *frame, const media_buf_t *mb, int decode_time) { frame_info_t fi; if(mb->mb_time != AV_NOPTS_VALUE) mp_set_current_time(mp, mb->mb_time); /* Compute aspect ratio */ switch(mb->mb_aspect_override) { case 0: if(frame->pan_scan != NULL && frame->pan_scan->width != 0) { fi.dar.num = frame->pan_scan->width; fi.dar.den = frame->pan_scan->height; } else { fi.dar.num = ctx->width; fi.dar.den = ctx->height; } if(ctx->sample_aspect_ratio.num) fi.dar = av_mul_q(fi.dar, ctx->sample_aspect_ratio); break; case 1: fi.dar = (AVRational){4,3}; break; case 2: fi.dar = (AVRational){16,9}; break; } int64_t pts = mb->mb_pts; /* Compute duration and PTS of frame */ if(pts == AV_NOPTS_VALUE && mb->mb_dts != AV_NOPTS_VALUE && (ctx->has_b_frames == 0 || frame->pict_type == FF_B_TYPE)) { pts = mb->mb_dts; } int duration = mb->mb_duration; if(!vd_valid_duration(duration)) { /* duration is zero or very invalid, use duration from last output */ duration = vd->vd_estimated_duration; } if(pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE) pts = vd->vd_nextpts; /* no pts set, use estimated pts */ if(pts != AV_NOPTS_VALUE && vd->vd_prevpts != AV_NOPTS_VALUE) { /* we know PTS of a prior frame */ int64_t t = (pts - vd->vd_prevpts) / vd->vd_prevpts_cnt; if(vd_valid_duration(t)) { /* inter frame duration seems valid, store it */ vd->vd_estimated_duration = t; if(duration == 0) duration = t; } else if(t < 0 || t > 10000000LL) { /* PTS discontinuity, use estimated PTS from last output instead */ pts = vd->vd_nextpts; } } duration += frame->repeat_pict * duration / 2; if(pts != AV_NOPTS_VALUE) { vd->vd_prevpts = pts; vd->vd_prevpts_cnt = 0; } vd->vd_prevpts_cnt++; if(duration == 0) { TRACE(TRACE_DEBUG, "Video", "Dropping frame with duration = 0"); return; } prop_set_int(mq->mq_prop_too_slow, decode_time > duration); if(pts != AV_NOPTS_VALUE) { vd->vd_nextpts = pts + duration; } else { vd->vd_nextpts = AV_NOPTS_VALUE; } vd->vd_interlaced |= frame->interlaced_frame && !mb->mb_disable_deinterlacer; fi.width = ctx->width; fi.height = ctx->height; fi.pix_fmt = ctx->pix_fmt; fi.pts = pts; fi.epoch = mb->mb_epoch; fi.duration = duration; fi.interlaced = !!vd->vd_interlaced; fi.tff = !!frame->top_field_first; fi.prescaled = 0; fi.color_space = ctx->colorspace; fi.color_range = ctx->color_range; video_deliver_frame(vd, FRAME_BUFFER_TYPE_LIBAV_FRAME, frame, &fi, mb->mb_send_pts); }
static void emit_frame(vtb_decoder_t *vtbd, vtb_frame_t *vf, media_queue_t *mq) { CGSize siz; frame_info_t fi; memset(&fi, 0, sizeof(fi)); if(vtbd->vtbd_last_pts != PTS_UNSET && vf->vf_mbm.mbm_pts != PTS_UNSET) { int64_t d = vf->vf_mbm.mbm_pts - vtbd->vtbd_last_pts; if(d > 1000 && d < 1000000) vtbd->vtbd_estimated_duration = d; } siz = CVImageBufferGetDisplaySize(vf->vf_buf); fi.fi_dar_num = siz.width; fi.fi_dar_den = siz.height; fi.fi_pts = vf->vf_mbm.mbm_pts; fi.fi_color_space = -1; fi.fi_epoch = vf->vf_mbm.mbm_epoch; fi.fi_drive_clock = vf->vf_mbm.mbm_drive_clock; fi.fi_user_time = vf->vf_mbm.mbm_user_time; fi.fi_vshift = 1; fi.fi_hshift = 1; fi.fi_duration = vf->vf_mbm.mbm_duration > 10000 ? vf->vf_mbm.mbm_duration : vtbd->vtbd_estimated_duration; siz = CVImageBufferGetEncodedSize(vf->vf_buf); fi.fi_width = siz.width; fi.fi_height = siz.height; video_decoder_t *vd = vtbd->vtbd_vd; vd->vd_estimated_duration = fi.fi_duration; // For bitrate calculations switch(vtbd->vtbd_pixel_format) { case kCVPixelFormatType_420YpCbCr8Planar: fi.fi_type = 'YUVP'; CVPixelBufferLockBaseAddress(vf->vf_buf, 0); for(int i = 0; i < 3; i++ ) { fi.fi_data[i] = CVPixelBufferGetBaseAddressOfPlane(vf->vf_buf, i); fi.fi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(vf->vf_buf, i); } if(fi.fi_duration > 0) video_deliver_frame(vd, &fi); CVPixelBufferUnlockBaseAddress(vf->vf_buf, 0); break; case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange: fi.fi_type = 'CVPB'; fi.fi_data[0] = (void *)vf->vf_buf; if(fi.fi_duration > 0) video_deliver_frame(vd, &fi); break; } vtbd->vtbd_last_pts = vf->vf_mbm.mbm_pts; char fmt[64]; snprintf(fmt, sizeof(fmt), "h264 (VTB) %d x %d", fi.fi_width, fi.fi_height); prop_set_string(mq->mq_prop_codec, fmt); }
static void libav_deliver_frame(video_decoder_t *vd, media_pipe_t *mp, media_queue_t *mq, AVCodecContext *ctx, AVFrame *frame, const media_buf_meta_t *mbm, int decode_time, const media_codec_t *mc) { frame_info_t fi; /* Compute aspect ratio */ switch(mbm->mbm_aspect_override) { case 0: fi.fi_dar_num = frame->width; fi.fi_dar_den = frame->height; if(frame->sample_aspect_ratio.num) { fi.fi_dar_num *= frame->sample_aspect_ratio.num; fi.fi_dar_den *= frame->sample_aspect_ratio.den; } else if(mc->sar_num) { fi.fi_dar_num *= mc->sar_num; fi.fi_dar_den *= mc->sar_den; } break; case 1: fi.fi_dar_num = 4; fi.fi_dar_den = 3; break; case 2: fi.fi_dar_num = 16; fi.fi_dar_den = 9; break; } int64_t pts = video_decoder_infer_pts(mbm, vd, frame->pict_type == AV_PICTURE_TYPE_B); int duration = mbm->mbm_duration; if(!vd_valid_duration(duration)) { /* duration is zero or very invalid, use duration from last output */ duration = vd->vd_estimated_duration; } if(pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE) pts = vd->vd_nextpts; /* no pts set, use estimated pts */ if(pts != AV_NOPTS_VALUE && vd->vd_prevpts != AV_NOPTS_VALUE) { /* we know PTS of a prior frame */ int64_t t = (pts - vd->vd_prevpts) / vd->vd_prevpts_cnt; if(vd_valid_duration(t)) { /* inter frame duration seems valid, store it */ vd->vd_estimated_duration = t; if(duration == 0) duration = t; } } duration += frame->repeat_pict * duration / 2; if(pts != AV_NOPTS_VALUE) { vd->vd_prevpts = pts; vd->vd_prevpts_cnt = 0; } vd->vd_prevpts_cnt++; if(duration == 0) { TRACE(TRACE_DEBUG, "Video", "Dropping frame with duration = 0"); return; } prop_set_int(mq->mq_prop_too_slow, decode_time > duration); if(pts != AV_NOPTS_VALUE) { vd->vd_nextpts = pts + duration; } else { vd->vd_nextpts = AV_NOPTS_VALUE; } #if 0 static int64_t lastpts = AV_NOPTS_VALUE; if(lastpts != AV_NOPTS_VALUE) { printf("DEC: %20"PRId64" : %-20"PRId64" %d %"PRId64" %6d %d\n", pts, pts - lastpts, mbm->mbm_drive_clock, mbm->mbm_delta, duration, mbm->mbm_sequence); if(pts - lastpts > 1000000) { abort(); } } lastpts = pts; #endif vd->vd_interlaced |= frame->interlaced_frame && !mbm->mbm_disable_deinterlacer; fi.fi_width = frame->width; fi.fi_height = frame->height; fi.fi_pts = pts; fi.fi_epoch = mbm->mbm_epoch; fi.fi_delta = mbm->mbm_delta; fi.fi_duration = duration; fi.fi_drive_clock = mbm->mbm_drive_clock; fi.fi_interlaced = !!vd->vd_interlaced; fi.fi_tff = !!frame->top_field_first; fi.fi_prescaled = 0; fi.fi_color_space = ctx->colorspace < ARRAYSIZE(libav_colorspace_tbl) ? libav_colorspace_tbl[ctx->colorspace] : 0; fi.fi_type = 'LAVC'; // Check if we should skip directly to convert code if(vd->vd_convert_width != frame->width || vd->vd_convert_height != frame->height || vd->vd_convert_pixfmt != frame->format) { // Nope, go ahead and deliver frame as-is fi.fi_data[0] = frame->data[0]; fi.fi_data[1] = frame->data[1]; fi.fi_data[2] = frame->data[2]; fi.fi_pitch[0] = frame->linesize[0]; fi.fi_pitch[1] = frame->linesize[1]; fi.fi_pitch[2] = frame->linesize[2]; fi.fi_pix_fmt = frame->format; fi.fi_avframe = frame; int r = video_deliver_frame(vd, &fi); /* return value * 0 = OK * 1 = Need convert to YUV420P * -1 = Fail */ if(r != 1) return; } // Need to convert frame vd->vd_sws = sws_getCachedContext(vd->vd_sws, frame->width, frame->height, frame->format, frame->width, frame->height, PIX_FMT_YUV420P, 0, NULL, NULL, NULL); if(vd->vd_sws == NULL) { TRACE(TRACE_ERROR, "Video", "Unable to convert from %s to %s", av_get_pix_fmt_name(frame->format), av_get_pix_fmt_name(PIX_FMT_YUV420P)); return; } if(vd->vd_convert_width != frame->width || vd->vd_convert_height != frame->height || vd->vd_convert_pixfmt != frame->format) { avpicture_free(&vd->vd_convert); vd->vd_convert_width = frame->width; vd->vd_convert_height = frame->height; vd->vd_convert_pixfmt = frame->format; avpicture_alloc(&vd->vd_convert, PIX_FMT_YUV420P, frame->width, frame->height); TRACE(TRACE_DEBUG, "Video", "Converting from %s to %s", av_get_pix_fmt_name(frame->format), av_get_pix_fmt_name(PIX_FMT_YUV420P)); } sws_scale(vd->vd_sws, (void *)frame->data, frame->linesize, 0, frame->height, vd->vd_convert.data, vd->vd_convert.linesize); fi.fi_data[0] = vd->vd_convert.data[0]; fi.fi_data[1] = vd->vd_convert.data[1]; fi.fi_data[2] = vd->vd_convert.data[2]; fi.fi_pitch[0] = vd->vd_convert.linesize[0]; fi.fi_pitch[1] = vd->vd_convert.linesize[1]; fi.fi_pitch[2] = vd->vd_convert.linesize[2]; fi.fi_type = 'LAVC'; fi.fi_pix_fmt = PIX_FMT_YUV420P; fi.fi_avframe = NULL; video_deliver_frame(vd, &fi); }