static int config_props(AVFilterLink *inlink) { VignetteContext *s = inlink->dst->priv; AVRational sar = inlink->sample_aspect_ratio; s->desc = av_pix_fmt_desc_get(inlink->format); s->var_values[VAR_W] = inlink->w; s->var_values[VAR_H] = inlink->h; s->var_values[VAR_TB] = av_q2d(inlink->time_base); s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ? NAN : av_q2d(inlink->frame_rate); if (!sar.num || !sar.den) sar.num = sar.den = 1; if (sar.num > sar.den) { s->xscale = av_q2d(av_div_q(sar, s->aspect)); s->yscale = 1; } else { s->yscale = av_q2d(av_div_q(s->aspect, sar)); s->xscale = 1; } s->dmax = hypot(inlink->w / 2., inlink->h / 2.); av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n", s->xscale, s->yscale, s->dmax); s->fmap_linesize = FFALIGN(inlink->w, 32); s->fmap = av_malloc_array(s->fmap_linesize, inlink->h * sizeof(*s->fmap)); if (!s->fmap) return AVERROR(ENOMEM); if (s->eval_mode == EVAL_MODE_INIT) update_context(s, inlink, NULL); return 0; }
static int config_props_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; TransContext *trans = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format); const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format); if (trans->dir&4) { av_log(ctx, AV_LOG_WARNING, "dir values greater than 3 are deprecated, use the passthrough option instead\n"); trans->dir &= 3; trans->passthrough = TRANSPOSE_PT_TYPE_LANDSCAPE; } if ((inlink->w >= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) || (inlink->w <= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) { av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d (passthrough mode)\n", inlink->w, inlink->h, inlink->w, inlink->h); return 0; } else { trans->passthrough = TRANSPOSE_PT_TYPE_NONE; } trans->hsub = desc_in->log2_chroma_w; trans->vsub = desc_in->log2_chroma_h; av_image_fill_max_pixsteps(trans->pixsteps, NULL, desc_out); outlink->w = inlink->h; outlink->h = inlink->w; if (inlink->sample_aspect_ratio.num) { #ifdef IDE_COMPILE AVRational tmp; tmp.num = 1; tmp.den = 1; outlink->sample_aspect_ratio = av_div_q(tmp, inlink->sample_aspect_ratio); #else outlink->sample_aspect_ratio = av_div_q((AVRational) { 1, 1 }, inlink->sample_aspect_ratio); #endif } else outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d dir:%d -> w:%d h:%d rotation:%s vflip:%d\n", inlink->w, inlink->h, trans->dir, outlink->w, outlink->h, trans->dir == 1 || trans->dir == 3 ? "clockwise" : "counterclockwise", trans->dir == 0 || trans->dir == 3); return 0; }
static void copy_mp_to_vs_frame_props(struct vf_priv_s *p, VSMap *map, struct mp_image *img) { struct mp_image_params *params = &img->params; if (params->d_w > 0 && params->d_h > 0) { AVRational dar = {params->d_w, params->d_h}; AVRational asp = {params->w, params->h}; AVRational par = av_div_q(dar, asp); p->vsapi->propSetInt(map, "_SARNum", par.num, 0); p->vsapi->propSetInt(map, "_SARDen", par.den, 0); } if (params->colorlevels) { p->vsapi->propSetInt(map, "_ColorRange", params->colorlevels == MP_CSP_LEVELS_TV, 0); } // The docs explicitly say it uses libavcodec values. p->vsapi->propSetInt(map, "_ColorSpace", mp_csp_to_avcol_spc(params->colorspace), 0); char pict_type = 0; switch (img->pict_type) { case 1: pict_type = 'I'; break; case 2: pict_type = 'P'; break; case 3: pict_type = 'B'; break; } if (pict_type) p->vsapi->propSetData(map, "_PictType", &pict_type, 1, 0); }
static int config_props_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; TransContext *trans = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[outlink->format]; trans->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; trans->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; av_image_fill_max_pixsteps(trans->pixsteps, NULL, pixdesc); outlink->w = inlink->h; outlink->h = inlink->w; if (inlink->sample_aspect_ratio.num){ outlink->sample_aspect_ratio = av_div_q((AVRational){1,1}, inlink->sample_aspect_ratio); } else outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; av_log(ctx, AV_LOG_INFO, "w:%d h:%d dir:%d -> w:%d h:%d rotation:%s vflip:%d\n", inlink->w, inlink->h, trans->dir, outlink->w, outlink->h, trans->dir == 1 || trans->dir == 3 ? "clockwise" : "counterclockwise", trans->dir == 0 || trans->dir == 3); return 0; }
void Bioscope::seek(qint64 ms) { // ms to time_base units... AVRational sec = { (int)ms, 1000}; AVRational ts = av_div_q(sec, m_detail->formatContext->streams[m_detail->vStreamIndex]->time_base); m_detail->last_pts = av_q2d( ts ); av_seek_frame(m_detail->formatContext, m_detail->vStreamIndex, (qint64)floor(m_detail->last_pts + 0.5), 0); }
static int config_output_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; FrameStepContext *framestep = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP; outlink->frame_rate = av_div_q(inlink->frame_rate, (AVRational){framestep->frame_step, 1}); av_log(ctx, AV_LOG_VERBOSE, "step:%d frame_rate:%d/%d(%f) -> frame_rate:%d/%d(%f)\n", framestep->frame_step, inlink->frame_rate.num, inlink->frame_rate.den, av_q2d(inlink->frame_rate), outlink->frame_rate.num, outlink->frame_rate.den, av_q2d(outlink->frame_rate)); return 0; }
static int config(struct vf_instance *vf, int w, int h, int dw, int dh, unsigned flags, unsigned fmt) { int ret; AVFilterLink *out; AVRational iar, dar; av_reduce(&iar.num, &iar.den, w, h, INT_MAX); av_reduce(&dar.num, &dar.den, dw, dh, INT_MAX); vf->priv->in_pixfmt = imgfmt2pixfmt(fmt); vf->priv->in_imgfmt = fmt; vf->priv->in_w = w; vf->priv->in_h = h; vf->priv->in_sar = av_div_q(dar, iar); ret = avfilter_graph_config(vf->priv->graph, NULL); if (ret < 0) return 0; out = vf->priv->out->inputs[0]; vf->priv->out_w = out->w; vf->priv->out_h = out->h; vf->priv->out_pixfmt = out->format; vf->priv->out_imgfmt = pixfmt2imgfmt(out->format); vf->priv->out_sar = out->sample_aspect_ratio; if (vf->priv->out_sar.num != vf->priv->in_sar.num || vf->priv->out_sar.den != vf->priv->in_sar.den || out->w != w || out->h != h) { av_reduce(&iar.num, &iar.den, out->w, out->h, INT_MAX); dar = av_mul_q(iar, out->sample_aspect_ratio); if (av_cmp_q(dar, iar) >= 0) { dh = out->h; dw = av_rescale(dh, dar.num, dar.den); } else { dw = out->w; dh = av_rescale(dw, dar.den, dar.num); } } return vf_next_config(vf, out->w, out->h, dw, dh, flags, fmt); }
// Pick a "good" timebase, which will be used to convert double timestamps // back to fractions for passing them through libavcodec. AVRational mp_get_codec_timebase(struct mp_codec_params *c) { AVRational tb = {c->native_tb_num, c->native_tb_den}; if (tb.num < 1 || tb.den < 1) { if (c->reliable_fps) tb = av_inv_q(av_d2q(c->fps, 1000000)); if (tb.num < 1 || tb.den < 1) tb = AV_TIME_BASE_Q; } // If the timebase is too coarse, raise its precision, or small adjustments // to timestamps done between decoder and demuxer could be lost. if (av_q2d(tb) > 0.001) { AVRational r = av_div_q(tb, (AVRational){1, 1000}); tb.den *= (r.num + r.den - 1) / r.den; } av_reduce(&tb.num, &tb.den, tb.num, tb.den, INT_MAX); if (tb.num < 1 || tb.den < 1) tb = AV_TIME_BASE_Q; return tb; }
static void copy_mp_to_vs_frame_props_map(struct vf_priv_s *p, VSMap *map, struct mp_image *img) { struct mp_image_params *params = &img->params; if (params->d_w > 0 && params->d_h > 0) { AVRational dar = {params->d_w, params->d_h}; AVRational asp = {params->w, params->h}; AVRational par = av_div_q(dar, asp); p->vsapi->propSetInt(map, "_SARNum", par.num, 0); p->vsapi->propSetInt(map, "_SARDen", par.den, 0); } if (params->colorlevels) { p->vsapi->propSetInt(map, "_ColorRange", params->colorlevels == MP_CSP_LEVELS_TV, 0); } // The docs explicitly say it uses libavcodec values. p->vsapi->propSetInt(map, "_ColorSpace", mp_csp_to_avcol_spc(params->colorspace), 0); if (params->chroma_location) { p->vsapi->propSetInt(map, "_ChromaLocation", params->chroma_location == MP_CHROMA_CENTER, 0); } char pict_type = 0; switch (img->pict_type) { case 1: pict_type = 'I'; break; case 2: pict_type = 'P'; break; case 3: pict_type = 'B'; break; } if (pict_type) p->vsapi->propSetData(map, "_PictType", &pict_type, 1, 0); int field = 0; if (img->fields & MP_IMGFIELD_INTERLACED) field = img->fields & MP_IMGFIELD_TOP_FIRST ? 2 : 1; p->vsapi->propSetInt(map, "_FieldBased", field, 0); }
static int config_props_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; TransContext *s = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format); const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format); if (s->dir&4) { av_log(ctx, AV_LOG_WARNING, "dir values greater than 3 are deprecated, use the passthrough option instead\n"); s->dir &= 3; s->passthrough = TRANSPOSE_PT_TYPE_LANDSCAPE; } if ((inlink->w >= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) || (inlink->w <= inlink->h && s->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) { av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d (passthrough mode)\n", inlink->w, inlink->h, inlink->w, inlink->h); return 0; } else { s->passthrough = TRANSPOSE_PT_TYPE_NONE; } s->hsub = desc_in->log2_chroma_w; s->vsub = desc_in->log2_chroma_h; s->planes = av_pix_fmt_count_planes(outlink->format); av_assert0(desc_in->nb_components == desc_out->nb_components); av_image_fill_max_pixsteps(s->pixsteps, NULL, desc_out); outlink->w = inlink->h; outlink->h = inlink->w; if (inlink->sample_aspect_ratio.num) outlink->sample_aspect_ratio = av_div_q((AVRational) { 1, 1 }, inlink->sample_aspect_ratio); else outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; switch (s->pixsteps[0]) { case 1: s->transpose_block = transpose_block_8_c; s->transpose_8x8 = transpose_8x8_8_c; break; case 2: s->transpose_block = transpose_block_16_c; s->transpose_8x8 = transpose_8x8_16_c; break; case 3: s->transpose_block = transpose_block_24_c; s->transpose_8x8 = transpose_8x8_24_c; break; case 4: s->transpose_block = transpose_block_32_c; s->transpose_8x8 = transpose_8x8_32_c; break; case 6: s->transpose_block = transpose_block_48_c; s->transpose_8x8 = transpose_8x8_48_c; break; case 8: s->transpose_block = transpose_block_64_c; s->transpose_8x8 = transpose_8x8_64_c; break; } av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d dir:%d -> w:%d h:%d rotation:%s vflip:%d\n", inlink->w, inlink->h, s->dir, outlink->w, outlink->h, s->dir == 1 || s->dir == 3 ? "clockwise" : "counterclockwise", s->dir == 0 || s->dir == 3); return 0; }
static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format) { AVCodecContext *avctx = opaque; CuvidContext *ctx = avctx->priv_data; AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data; CUVIDDECODECREATEINFO cuinfo; av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence); ctx->internal_error = 0; avctx->width = format->display_area.right; avctx->height = format->display_area.bottom; ff_set_sar(avctx, av_div_q( (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y }, (AVRational){ avctx->width, avctx->height })); if (!format->progressive_sequence && ctx->deint_mode == cudaVideoDeinterlaceMode_Weave) avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT; else avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT; if (format->video_signal_description.video_full_range_flag) avctx->color_range = AVCOL_RANGE_JPEG; else avctx->color_range = AVCOL_RANGE_MPEG; avctx->color_primaries = format->video_signal_description.color_primaries; avctx->color_trc = format->video_signal_description.transfer_characteristics; avctx->colorspace = format->video_signal_description.matrix_coefficients; if (format->bitrate) avctx->bit_rate = format->bitrate; if (format->frame_rate.numerator && format->frame_rate.denominator) { avctx->framerate.num = format->frame_rate.numerator; avctx->framerate.den = format->frame_rate.denominator; } if (ctx->cudecoder && avctx->coded_width == format->coded_width && avctx->coded_height == format->coded_height && ctx->chroma_format == format->chroma_format && ctx->codec_type == format->codec) return 1; if (ctx->cudecoder) { av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n"); ctx->internal_error = CHECK_CU(cuvidDestroyDecoder(ctx->cudecoder)); if (ctx->internal_error < 0) return 0; ctx->cudecoder = NULL; } if (hwframe_ctx->pool && ( hwframe_ctx->width < avctx->width || hwframe_ctx->height < avctx->height || hwframe_ctx->format != AV_PIX_FMT_CUDA || hwframe_ctx->sw_format != AV_PIX_FMT_NV12)) { av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n"); ctx->internal_error = AVERROR(EINVAL); return 0; } if (format->chroma_format != cudaVideoChromaFormat_420) { av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n"); ctx->internal_error = AVERROR(EINVAL); return 0; } avctx->coded_width = format->coded_width; avctx->coded_height = format->coded_height; ctx->chroma_format = format->chroma_format; memset(&cuinfo, 0, sizeof(cuinfo)); cuinfo.CodecType = ctx->codec_type = format->codec; cuinfo.ChromaFormat = format->chroma_format; cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12; cuinfo.ulWidth = avctx->coded_width; cuinfo.ulHeight = avctx->coded_height; cuinfo.ulTargetWidth = cuinfo.ulWidth; cuinfo.ulTargetHeight = cuinfo.ulHeight; cuinfo.target_rect.left = 0; cuinfo.target_rect.top = 0; cuinfo.target_rect.right = cuinfo.ulWidth; cuinfo.target_rect.bottom = cuinfo.ulHeight; cuinfo.ulNumDecodeSurfaces = MAX_FRAME_COUNT; cuinfo.ulNumOutputSurfaces = 1; cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID; cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8; if (format->progressive_sequence) { ctx->deint_mode = cuinfo.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave; } else { cuinfo.DeinterlaceMode = ctx->deint_mode; } if (ctx->deint_mode != cudaVideoDeinterlaceMode_Weave) avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1}); ctx->internal_error = CHECK_CU(cuvidCreateDecoder(&ctx->cudecoder, &cuinfo)); if (ctx->internal_error < 0) return 0; if (!hwframe_ctx->pool) { hwframe_ctx->format = AV_PIX_FMT_CUDA; hwframe_ctx->sw_format = AV_PIX_FMT_NV12; hwframe_ctx->width = avctx->width; hwframe_ctx->height = avctx->height; if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n"); return 0; } } return 1; }
static int mediacodec_dec_parse_format(AVCodecContext *avctx, MediaCodecDecContext *s) { int ret = 0; int width = 0; int height = 0; char *format = NULL; if (!s->format) { av_log(avctx, AV_LOG_ERROR, "Output MediaFormat is not set\n"); return AVERROR(EINVAL); } format = ff_AMediaFormat_toString(s->format); if (!format) { return AVERROR_EXTERNAL; } av_log(avctx, AV_LOG_DEBUG, "Parsing MediaFormat %s\n", format); /* Mandatory fields */ AMEDIAFORMAT_GET_INT32(s->width, "width", 1); AMEDIAFORMAT_GET_INT32(s->height, "height", 1); AMEDIAFORMAT_GET_INT32(s->stride, "stride", 1); s->stride = s->stride > 0 ? s->stride : s->width; AMEDIAFORMAT_GET_INT32(s->slice_height, "slice-height", 1); s->slice_height = s->slice_height > 0 ? s->slice_height : s->height; if (strstr(s->codec_name, "OMX.Nvidia.")) { s->slice_height = FFALIGN(s->height, 16); } else if (strstr(s->codec_name, "OMX.SEC.avc.dec")) { s->slice_height = avctx->height; s->stride = avctx->width; } AMEDIAFORMAT_GET_INT32(s->color_format, "color-format", 1); avctx->pix_fmt = mcdec_map_color_format(avctx, s, s->color_format); if (avctx->pix_fmt == AV_PIX_FMT_NONE) { av_log(avctx, AV_LOG_ERROR, "Output color format is not supported\n"); ret = AVERROR(EINVAL); goto fail; } /* Optional fields */ AMEDIAFORMAT_GET_INT32(s->crop_top, "crop-top", 0); AMEDIAFORMAT_GET_INT32(s->crop_bottom, "crop-bottom", 0); AMEDIAFORMAT_GET_INT32(s->crop_left, "crop-left", 0); AMEDIAFORMAT_GET_INT32(s->crop_right, "crop-right", 0); width = s->crop_right + 1 - s->crop_left; height = s->crop_bottom + 1 - s->crop_top; AMEDIAFORMAT_GET_INT32(s->display_width, "display-width", 0); AMEDIAFORMAT_GET_INT32(s->display_height, "display-height", 0); if (s->display_width && s->display_height) { AVRational sar = av_div_q( (AVRational){ s->display_width, s->display_height }, (AVRational){ width, height }); ff_set_sar(avctx, sar); } av_log(avctx, AV_LOG_INFO, "Output crop parameters top=%d bottom=%d left=%d right=%d, " "resulting dimensions width=%d height=%d\n", s->crop_top, s->crop_bottom, s->crop_left, s->crop_right, width, height); av_freep(&format); return ff_set_dimensions(avctx, width, height); fail: av_freep(&format); return ret; }
static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format) { AVCodecContext *avctx = opaque; CuvidContext *ctx = avctx->priv_data; AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data; CUVIDDECODECAPS *caps = NULL; CUVIDDECODECREATEINFO cuinfo; int surface_fmt; int old_width = avctx->width; int old_height = avctx->height; enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE, // Will be updated below AV_PIX_FMT_NONE }; av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence); memset(&cuinfo, 0, sizeof(cuinfo)); ctx->internal_error = 0; avctx->coded_width = cuinfo.ulWidth = format->coded_width; avctx->coded_height = cuinfo.ulHeight = format->coded_height; // apply cropping cuinfo.display_area.left = format->display_area.left + ctx->crop.left; cuinfo.display_area.top = format->display_area.top + ctx->crop.top; cuinfo.display_area.right = format->display_area.right - ctx->crop.right; cuinfo.display_area.bottom = format->display_area.bottom - ctx->crop.bottom; // width and height need to be set before calling ff_get_format if (ctx->resize_expr) { avctx->width = ctx->resize.width; avctx->height = ctx->resize.height; } else { avctx->width = cuinfo.display_area.right - cuinfo.display_area.left; avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top; } // target width/height need to be multiples of two cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1; cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1; // aspect ratio conversion, 1:1, depends on scaled resolution cuinfo.target_rect.left = 0; cuinfo.target_rect.top = 0; cuinfo.target_rect.right = cuinfo.ulTargetWidth; cuinfo.target_rect.bottom = cuinfo.ulTargetHeight; switch (format->bit_depth_luma_minus8) { case 0: // 8-bit pix_fmts[1] = AV_PIX_FMT_NV12; caps = &ctx->caps8; break; case 2: // 10-bit pix_fmts[1] = AV_PIX_FMT_P010; caps = &ctx->caps10; break; case 4: // 12-bit pix_fmts[1] = AV_PIX_FMT_P016; caps = &ctx->caps12; break; default: break; } if (!caps || !caps->bIsSupported) { av_log(avctx, AV_LOG_ERROR, "unsupported bit depth: %d\n", format->bit_depth_luma_minus8 + 8); ctx->internal_error = AVERROR(EINVAL); return 0; } surface_fmt = ff_get_format(avctx, pix_fmts); if (surface_fmt < 0) { av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", surface_fmt); ctx->internal_error = AVERROR(EINVAL); return 0; } av_log(avctx, AV_LOG_VERBOSE, "Formats: Original: %s | HW: %s | SW: %s\n", av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(surface_fmt), av_get_pix_fmt_name(avctx->sw_pix_fmt)); avctx->pix_fmt = surface_fmt; // Update our hwframe ctx, as the get_format callback might have refreshed it! if (avctx->hw_frames_ctx) { av_buffer_unref(&ctx->hwframe); ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx); if (!ctx->hwframe) { ctx->internal_error = AVERROR(ENOMEM); return 0; } hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data; } ff_set_sar(avctx, av_div_q( (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y }, (AVRational){ avctx->width, avctx->height })); ctx->deint_mode_current = format->progressive_sequence ? cudaVideoDeinterlaceMode_Weave : ctx->deint_mode; if (!format->progressive_sequence && ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave) avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT; else avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT; if (format->video_signal_description.video_full_range_flag) avctx->color_range = AVCOL_RANGE_JPEG; else avctx->color_range = AVCOL_RANGE_MPEG; avctx->color_primaries = format->video_signal_description.color_primaries; avctx->color_trc = format->video_signal_description.transfer_characteristics; avctx->colorspace = format->video_signal_description.matrix_coefficients; if (format->bitrate) avctx->bit_rate = format->bitrate; if (format->frame_rate.numerator && format->frame_rate.denominator) { avctx->framerate.num = format->frame_rate.numerator; avctx->framerate.den = format->frame_rate.denominator; } if (ctx->cudecoder && avctx->coded_width == format->coded_width && avctx->coded_height == format->coded_height && avctx->width == old_width && avctx->height == old_height && ctx->chroma_format == format->chroma_format && ctx->codec_type == format->codec) return 1; if (ctx->cudecoder) { av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n"); ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder)); if (ctx->internal_error < 0) return 0; ctx->cudecoder = NULL; } if (hwframe_ctx->pool && ( hwframe_ctx->width < avctx->width || hwframe_ctx->height < avctx->height || hwframe_ctx->format != AV_PIX_FMT_CUDA || hwframe_ctx->sw_format != avctx->sw_pix_fmt)) { av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n"); av_log(avctx, AV_LOG_DEBUG, "width: %d <-> %d\n", hwframe_ctx->width, avctx->width); av_log(avctx, AV_LOG_DEBUG, "height: %d <-> %d\n", hwframe_ctx->height, avctx->height); av_log(avctx, AV_LOG_DEBUG, "format: %s <-> cuda\n", av_get_pix_fmt_name(hwframe_ctx->format)); av_log(avctx, AV_LOG_DEBUG, "sw_format: %s <-> %s\n", av_get_pix_fmt_name(hwframe_ctx->sw_format), av_get_pix_fmt_name(avctx->sw_pix_fmt)); ctx->internal_error = AVERROR(EINVAL); return 0; } if (format->chroma_format != cudaVideoChromaFormat_420) { av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n"); ctx->internal_error = AVERROR(EINVAL); return 0; } ctx->chroma_format = format->chroma_format; cuinfo.CodecType = ctx->codec_type = format->codec; cuinfo.ChromaFormat = format->chroma_format; switch (avctx->sw_pix_fmt) { case AV_PIX_FMT_NV12: cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12; break; case AV_PIX_FMT_P010: case AV_PIX_FMT_P016: cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016; break; default: av_log(avctx, AV_LOG_ERROR, "Output formats other than NV12, P010 or P016 are not supported\n"); ctx->internal_error = AVERROR(EINVAL); return 0; } cuinfo.ulNumDecodeSurfaces = ctx->nb_surfaces; cuinfo.ulNumOutputSurfaces = 1; cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID; cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8; cuinfo.DeinterlaceMode = ctx->deint_mode_current; if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field) avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1}); ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidCreateDecoder(&ctx->cudecoder, &cuinfo)); if (ctx->internal_error < 0) return 0; if (!hwframe_ctx->pool) { hwframe_ctx->format = AV_PIX_FMT_CUDA; hwframe_ctx->sw_format = avctx->sw_pix_fmt; hwframe_ctx->width = avctx->width; hwframe_ctx->height = avctx->height; if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n"); return 0; } } return 1; }