/* Note: To get YUV range, must not pass avoid_yuv_scale_conversion( &ctx->pix_fmt ) before this function. */ int initialize_scaler_handler ( lw_video_scaler_handler_t *vshp, AVCodecContext *ctx, int enabled, int flags, enum AVPixelFormat output_pixel_format ) { if( flags != SWS_FAST_BILINEAR ) flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP | SWS_ACCURATE_RND | SWS_BITEXACT; int yuv_range = avoid_yuv_scale_conversion( &ctx->pix_fmt ); if( ctx->color_range == AVCOL_RANGE_MPEG || ctx->color_range == AVCOL_RANGE_JPEG ) yuv_range = (ctx->color_range == AVCOL_RANGE_JPEG); vshp->sws_ctx = update_scaler_configuration( NULL, flags, ctx->width, ctx->height, ctx->pix_fmt, output_pixel_format, ctx->colorspace, yuv_range ); if( !vshp->sws_ctx ) return -1; vshp->enabled = enabled; vshp->flags = flags; vshp->input_width = ctx->width; vshp->input_height = ctx->height; vshp->input_pixel_format = ctx->pix_fmt; vshp->output_pixel_format = output_pixel_format; vshp->input_colorspace = ctx->colorspace; vshp->input_yuv_range = yuv_range; return 0; }
VSFrameRef *make_frame ( lw_video_output_handler_t *vohp, AVCodecContext *ctx, AVFrame *av_frame ) { vs_video_output_handler_t *vs_vohp = (vs_video_output_handler_t *)vohp->private_handler; VSFrameContext *frame_ctx = vs_vohp->frame_ctx; VSCore *core = vs_vohp->core; const VSAPI *vsapi = vs_vohp->vsapi; if( vs_vohp->direct_rendering && !vohp->scaler.enabled && av_frame->opaque ) { /* Render from the decoder directly. */ vs_video_buffer_handler_t *vs_vbhp = (vs_video_buffer_handler_t *)av_frame->opaque; return vs_vbhp ? (VSFrameRef *)vs_vbhp->vsapi->cloneFrameRef( vs_vbhp->vs_frame_buffer ) : NULL; } if( !vs_vohp->make_frame ) return NULL; /* Convert pixel format if needed. We don't change the presentation resolution. */ enum AVPixelFormat *input_pixel_format = (enum AVPixelFormat *)&av_frame->format; int yuv_range = avoid_yuv_scale_conversion( input_pixel_format ); lw_video_scaler_handler_t *vshp = &vohp->scaler; if( !vshp->sws_ctx || vshp->input_width != ctx->width || vshp->input_height != ctx->height || vshp->input_pixel_format != *input_pixel_format || vshp->input_colorspace != ctx->colorspace || vshp->input_yuv_range != yuv_range ) { /* Update scaler. */ vshp->sws_ctx = update_scaler_configuration( vshp->sws_ctx, vshp->flags, ctx->width, ctx->height, *input_pixel_format, vshp->output_pixel_format, ctx->colorspace, yuv_range ); if( !vshp->sws_ctx ) { if( frame_ctx ) vsapi->setFilterError( "lsmas: failed to update scaler settings.", frame_ctx ); return NULL; } vshp->input_width = ctx->width; vshp->input_height = ctx->height; vshp->input_pixel_format = *input_pixel_format; vshp->input_colorspace = ctx->colorspace; vshp->input_yuv_range = yuv_range; } /* Make video frame. */ AVPicture av_picture; int ret = convert_av_pixel_format( vshp, ctx->width, ctx->height, av_frame, &av_picture ); if( ret < 0 ) { if( frame_ctx ) vsapi->setFilterError( "lsmas: failed to av_image_alloc.", frame_ctx ); return NULL; } VSFrameRef *vs_frame = new_output_video_frame( vohp, ctx->width, ctx->height, *input_pixel_format, frame_ctx, core, vsapi ); if( vs_frame ) vs_vohp->make_frame( &av_picture, ctx->width, ctx->height, vs_vohp->component_reorder, vs_frame, frame_ctx, vsapi ); else if( frame_ctx ) vsapi->setFilterError( "lsmas: failed to alloc a output video frame.", frame_ctx ); if( ret > 0 ) av_free( av_picture.data[0] ); return vs_frame; }