VSFrameRef *make_frame ( lw_video_output_handler_t *vohp, AVFrame *av_frame ) { vs_video_output_handler_t *vs_vohp = (vs_video_output_handler_t *)vohp->private_handler; lw_video_scaler_handler_t *vshp = &vohp->scaler; VSFrameContext *frame_ctx = vs_vohp->frame_ctx; VSCore *core = vs_vohp->core; const VSAPI *vsapi = vs_vohp->vsapi; if( av_frame->opaque ) { /* Render from the decoder directly. */ vs_video_buffer_handler_t *vs_vbhp = (vs_video_buffer_handler_t *)av_frame->opaque; return vs_vbhp ? (VSFrameRef *)vs_vbhp->vsapi->cloneFrameRef( vs_vbhp->vs_frame_buffer ) : NULL; } if( !vs_vohp->make_frame ) return NULL; /* Make video frame. * Convert pixel format if needed. We don't change the presentation resolution. */ VSFrameRef *vs_frame = new_output_video_frame( vs_vohp, av_frame, &vshp->output_pixel_format, &vshp->enabled, !!(vshp->frame_prop_change_flags & LW_FRAME_PROP_CHANGE_FLAG_PIXEL_FORMAT), frame_ctx, core, vsapi ); if( vs_frame ) vs_vohp->make_frame( vshp, av_frame, vs_vohp->component_reorder, vs_frame, frame_ctx, vsapi ); else if( frame_ctx ) vsapi->setFilterError( "lsmas: failed to allocate a output video frame.", frame_ctx ); return vs_frame; }
static int vs_video_get_buffer ( AVCodecContext *ctx, AVFrame *av_frame, int flags ) { av_frame->opaque = NULL; lw_video_output_handler_t *lw_vohp = (lw_video_output_handler_t *)ctx->opaque; vs_video_output_handler_t *vs_vohp = (vs_video_output_handler_t *)lw_vohp->private_handler; enum AVPixelFormat pix_fmt = av_frame->format; avoid_yuv_scale_conversion( &pix_fmt ); av_frame->format = pix_fmt; /* Don't use AV_PIX_FMT_YUVJ*. */ if( (!vs_vohp->variable_info && lw_vohp->scaler.output_pixel_format != pix_fmt) || !vs_check_dr_available( ctx, pix_fmt ) ) return avcodec_default_get_buffer2( ctx, av_frame, flags ); /* New VapourSynth video frame buffer. */ vs_video_buffer_handler_t *vs_vbhp = malloc( sizeof(vs_video_buffer_handler_t) ); if( !vs_vbhp ) { av_frame_unref( av_frame ); return AVERROR( ENOMEM ); } av_frame->opaque = vs_vbhp; avcodec_align_dimensions2( ctx, &av_frame->width, &av_frame->height, av_frame->linesize ); VSFrameRef *vs_frame_buffer = new_output_video_frame( vs_vohp, av_frame, NULL, NULL, 0, vs_vohp->frame_ctx, vs_vohp->core, vs_vohp->vsapi ); if( !vs_frame_buffer ) { free( vs_vbhp ); av_frame_unref( av_frame ); return AVERROR( ENOMEM ); } vs_vbhp->vs_frame_buffer = vs_frame_buffer; vs_vbhp->vsapi = vs_vohp->vsapi; /* Create frame buffers for the decoder. * The callback vs_video_release_buffer_handler() shall be called when no reference to the video buffer handler is present. * The callback vs_video_unref_buffer_handler() decrements the reference-counter by 1. */ memset( av_frame->buf, 0, sizeof(av_frame->buf) ); memset( av_frame->data, 0, sizeof(av_frame->data) ); memset( av_frame->linesize, 0, sizeof(av_frame->linesize) ); AVBufferRef *vs_buffer_handler = av_buffer_create( NULL, 0, vs_video_release_buffer_handler, vs_vbhp, 0 ); if( !vs_buffer_handler ) { vs_video_release_buffer_handler( vs_vbhp, NULL ); av_frame_unref( av_frame ); return AVERROR( ENOMEM ); } vs_vohp->component_reorder = get_component_reorder( pix_fmt ); for( int i = 0; i < 3; i++ ) if( vs_create_plane_buffer( vs_vbhp, vs_buffer_handler, av_frame, i, vs_vohp->component_reorder[i] ) < 0 ) goto fail; /* Here, a variable 'vs_buffer_handler' itself is not referenced by any pointer. */ av_buffer_unref( &vs_buffer_handler ); av_frame->nb_extended_buf = 0; av_frame->extended_data = av_frame->data; return 0; fail: av_frame_unref( av_frame ); av_buffer_unref( &vs_buffer_handler ); return AVERROR( ENOMEM ); }
VSFrameRef *make_frame ( lw_video_output_handler_t *vohp, AVCodecContext *ctx, AVFrame *av_frame ) { vs_video_output_handler_t *vs_vohp = (vs_video_output_handler_t *)vohp->private_handler; VSFrameContext *frame_ctx = vs_vohp->frame_ctx; VSCore *core = vs_vohp->core; const VSAPI *vsapi = vs_vohp->vsapi; if( vs_vohp->direct_rendering && !vohp->scaler.enabled && av_frame->opaque ) { /* Render from the decoder directly. */ vs_video_buffer_handler_t *vs_vbhp = (vs_video_buffer_handler_t *)av_frame->opaque; return vs_vbhp ? (VSFrameRef *)vs_vbhp->vsapi->cloneFrameRef( vs_vbhp->vs_frame_buffer ) : NULL; } if( !vs_vohp->make_frame ) return NULL; /* Convert pixel format if needed. We don't change the presentation resolution. */ enum AVPixelFormat *input_pixel_format = (enum AVPixelFormat *)&av_frame->format; int yuv_range = avoid_yuv_scale_conversion( input_pixel_format ); lw_video_scaler_handler_t *vshp = &vohp->scaler; if( !vshp->sws_ctx || vshp->input_width != ctx->width || vshp->input_height != ctx->height || vshp->input_pixel_format != *input_pixel_format || vshp->input_colorspace != ctx->colorspace || vshp->input_yuv_range != yuv_range ) { /* Update scaler. */ vshp->sws_ctx = update_scaler_configuration( vshp->sws_ctx, vshp->flags, ctx->width, ctx->height, *input_pixel_format, vshp->output_pixel_format, ctx->colorspace, yuv_range ); if( !vshp->sws_ctx ) { if( frame_ctx ) vsapi->setFilterError( "lsmas: failed to update scaler settings.", frame_ctx ); return NULL; } vshp->input_width = ctx->width; vshp->input_height = ctx->height; vshp->input_pixel_format = *input_pixel_format; vshp->input_colorspace = ctx->colorspace; vshp->input_yuv_range = yuv_range; } /* Make video frame. */ AVPicture av_picture; int ret = convert_av_pixel_format( vshp, ctx->width, ctx->height, av_frame, &av_picture ); if( ret < 0 ) { if( frame_ctx ) vsapi->setFilterError( "lsmas: failed to av_image_alloc.", frame_ctx ); return NULL; } VSFrameRef *vs_frame = new_output_video_frame( vohp, ctx->width, ctx->height, *input_pixel_format, frame_ctx, core, vsapi ); if( vs_frame ) vs_vohp->make_frame( &av_picture, ctx->width, ctx->height, vs_vohp->component_reorder, vs_frame, frame_ctx, vsapi ); else if( frame_ctx ) vsapi->setFilterError( "lsmas: failed to alloc a output video frame.", frame_ctx ); if( ret > 0 ) av_free( av_picture.data[0] ); return vs_frame; }