void deletePostProc(ADM_PP *pp) { aprintf("Deleting post proc\n"); if(pp->ppMode) {pp_free_mode(pp->ppMode);pp->ppMode=NULL;} if(pp->ppContext) {pp_free_context(pp->ppContext);pp->ppContext=NULL;} }
/** \fn cleanup */ bool ADM_PP::cleanup(void) { aprintf("Deleting post proc\n"); if(ppMode) {pp_free_mode(ppMode);ppMode=NULL;} if(ppContext) {pp_free_context(ppContext);ppContext=NULL;} return true; }
void CDVDVideoPPFFmpeg::Dispose() { if (m_pMode) { pp_free_mode(m_pMode); m_pMode = NULL; } if(m_pContext) { pp_free_context(m_pContext); m_pContext = NULL; } if( m_FrameBuffer.iFlags & DVP_FLAG_ALLOCATED ) { for( int i = 0; i<4; i++ ) { if( m_FrameBuffer.data[i] ) { _aligned_free(m_FrameBuffer.data[i]); m_FrameBuffer.data[i] = NULL; m_FrameBuffer.iLineSize[i] = 0; } } m_FrameBuffer.iFlags &= ~DVP_FLAG_ALLOCATED; } m_iInitWidth = 0; m_iInitHeight = 0; }
static void uninit(struct vf_instance_s* vf){ int i; for(i=0; i<=PP_QUALITY_MAX; i++){ if(vf->priv->ppMode[i]) pp_free_mode(vf->priv->ppMode[i]); } if(vf->priv->context) pp_free_context(vf->priv->context); }
static av_cold void pp_uninit(AVFilterContext *ctx) { int i; PPFilterContext *pp = ctx->priv; for (i = 0; i <= PP_QUALITY_MAX; i++) pp_free_mode(pp->modes[i]); if (pp->pp_ctx) pp_free_context(pp->pp_ctx); }
FFPP::~FFPP() { if (PPMode) pp_free_mode(PPMode); if (PPContext) pp_free_context(PPContext); if (SWSTo422P) sws_freeContext(SWSTo422P); if (SWSFrom422P) sws_freeContext(SWSFrom422P); avpicture_free(&InputPicture); avpicture_free(&OutputPicture); }
void FFMS_VideoSource::ResetPP() { #ifdef FFMS_USE_POSTPROC if (PPContext) pp_free_context(PPContext); PPContext = NULL; if (PPMode) pp_free_mode(PPMode); PPMode = NULL; #endif /* FFMS_USE_POSTPROC */ OutputFrame(DecodeFrame); }
//************************************************************* void ADMVideoLavPPDeint::cleanup(void) { if(ppcontext) { pp_free_context(ppcontext); ppcontext=NULL; } if(ppmode) { pp_free_mode(ppmode); ppmode=NULL; } }
/** \fn cleanup */ bool lavDeint::cleanup(void) { if(ppcontext) { pp_free_context(ppcontext); ppcontext=NULL; } if(ppmode) { pp_free_mode(ppmode); ppmode=NULL; } return true; }
/***************************************************************************** * ClosePostproc *****************************************************************************/ static void ClosePostproc( vlc_object_t *p_this ) { filter_t *p_filter = (filter_t *)p_this; filter_sys_t *p_sys = p_filter->p_sys; /* delete the callback before destroying the mutex */ var_DelCallback( p_filter, FILTER_PREFIX "q", PPQCallback, NULL ); var_DelCallback( p_filter, FILTER_PREFIX "name", PPNameCallback, NULL ); /* Destroy the resources */ vlc_mutex_destroy( &p_sys->lock ); pp_free_context( p_sys->pp_context ); if( p_sys->pp_mode ) pp_free_mode( p_sys->pp_mode ); free( p_sys ); }
static int config(struct vf_instance *vf, int width, int height, int d_width, int d_height, unsigned int voflags, unsigned int outfmt){ int flags= PP_CPU_CAPS_AUTO; switch(outfmt){ case IMGFMT_444P: flags|= PP_FORMAT_444; break; case IMGFMT_422P: flags|= PP_FORMAT_422; break; case IMGFMT_411P: flags|= PP_FORMAT_411; break; default: flags|= PP_FORMAT_420; break; } if(vf->priv->context) pp_free_context(vf->priv->context); vf->priv->context= pp_get_context(width, height, flags); return vf_next_config(vf,width,height,d_width,d_height,voflags,outfmt); }
static void gst_post_proc_dispose (GObject * object) { GstPostProc *postproc = (GstPostProc *) object; if (postproc->mode) pp_free_mode (postproc->mode); if (postproc->context) pp_free_context (postproc->context); g_free (postproc->cargs); postproc->cargs = NULL; g_free (postproc->args); postproc->args = NULL; G_OBJECT_CLASS (parent_class)->dispose (object); }
FFMS_VideoSource::~FFMS_VideoSource() { #ifdef FFMS_USE_POSTPROC if (PPMode) pp_free_mode(PPMode); if (PPContext) pp_free_context(PPContext); avpicture_free(&PPFrame); #endif // FFMS_USE_POSTPROC if (SWS) sws_freeContext(SWS); avpicture_free(&SWSFrame); av_freep(&DecodeFrame); Index.Release(); }
static void change_context (GstPostProc * postproc, gint width, gint height) { guint mmx_flags; guint altivec_flags; gint ppflags; GST_DEBUG_OBJECT (postproc, "change_context, width:%d, height:%d", width, height); if ((width != postproc->width) && (height != postproc->height)) { if (postproc->context) pp_free_context (postproc->context); #ifdef HAVE_ORC mmx_flags = orc_target_get_default_flags (orc_target_get_by_name ("mmx")); altivec_flags = orc_target_get_default_flags (orc_target_get_by_name ("altivec")); ppflags = (mmx_flags & ORC_TARGET_MMX_MMX ? PP_CPU_CAPS_MMX : 0) | (mmx_flags & ORC_TARGET_MMX_MMXEXT ? PP_CPU_CAPS_MMX2 : 0) | (mmx_flags & ORC_TARGET_MMX_3DNOW ? PP_CPU_CAPS_3DNOW : 0) | (altivec_flags & ORC_TARGET_ALTIVEC_ALTIVEC ? PP_CPU_CAPS_ALTIVEC : 0); #else mmx_flags = 0; altivec_flags = 0; ppflags = 0; #endif postproc->context = pp_get_context (width, height, PP_FORMAT_420 | ppflags); postproc->width = width; postproc->height = height; postproc->ystride = ROUND_UP_4 (width); postproc->ustride = ROUND_UP_8 (width) / 2; postproc->vstride = ROUND_UP_8 (postproc->ystride) / 2; postproc->ysize = postproc->ystride * ROUND_UP_2 (height); postproc->usize = postproc->ustride * ROUND_UP_2 (height) / 2; postproc->vsize = postproc->vstride * ROUND_UP_2 (height) / 2; GST_DEBUG_OBJECT (postproc, "new strides are (YUV) : %d %d %d", postproc->ystride, postproc->ustride, postproc->vstride); } }
void FFMS_VideoSource::ReAdjustPP(PixelFormat VPixelFormat, int Width, int Height) { #ifdef FFMS_USE_POSTPROC if (PPContext) pp_free_context(PPContext); PPContext = NULL; if (!PPMode) return; int Flags = GetPPCPUFlags(); switch (VPixelFormat) { case PIX_FMT_YUV420P: case PIX_FMT_YUVJ420P: Flags |= PP_FORMAT_420; break; case PIX_FMT_YUV422P: case PIX_FMT_YUVJ422P: Flags |= PP_FORMAT_422; break; case PIX_FMT_YUV411P: Flags |= PP_FORMAT_411; break; case PIX_FMT_YUV444P: case PIX_FMT_YUVJ444P: Flags |= PP_FORMAT_444; break; default: ResetPP(); throw FFMS_Exception(FFMS_ERROR_POSTPROCESSING, FFMS_ERROR_UNSUPPORTED, "The video does not have a colorspace suitable for postprocessing"); } PPContext = pp_get_context(Width, Height, Flags); avpicture_free(&PPFrame); avpicture_alloc(&PPFrame, VPixelFormat, Width, Height); #else return; #endif /* FFMS_USE_POSTPROC */ }
void cleanup(VideoFilter *filter) { pp_free_context(((ThisFilter*)filter)->context); pp_free_mode(((ThisFilter*)filter)->mode); }
/***************************************************************************** * OpenPostproc: probe and open the postproc *****************************************************************************/ static int OpenPostproc( vlc_object_t *p_this ) { filter_t *p_filter = (filter_t *)p_this; filter_sys_t *p_sys; vlc_value_t val, val_orig, text; unsigned i_cpu = vlc_CPU(); int i_flags = 0; if( p_filter->fmt_in.video.i_chroma != p_filter->fmt_out.video.i_chroma || p_filter->fmt_in.video.i_height != p_filter->fmt_out.video.i_height || p_filter->fmt_in.video.i_width != p_filter->fmt_out.video.i_width ) { msg_Err( p_filter, "Filter input and output formats must be identical" ); return VLC_EGENERIC; } /* Set CPU capabilities */ if( i_cpu & CPU_CAPABILITY_MMX ) i_flags |= PP_CPU_CAPS_MMX; if( i_cpu & CPU_CAPABILITY_MMXEXT ) i_flags |= PP_CPU_CAPS_MMX2; if( i_cpu & CPU_CAPABILITY_3DNOW ) i_flags |= PP_CPU_CAPS_3DNOW; if( i_cpu & CPU_CAPABILITY_ALTIVEC ) i_flags |= PP_CPU_CAPS_ALTIVEC; switch( p_filter->fmt_in.video.i_chroma ) { case VLC_CODEC_I444: case VLC_CODEC_J444: /* case VLC_CODEC_YUVA: FIXME: Should work but alpha plane needs to be copied manually and I'm kind of feeling too lazy to write the code to do that ATM (i_pitch vs i_visible_pitch...). */ i_flags |= PP_FORMAT_444; break; case VLC_CODEC_I422: case VLC_CODEC_J422: i_flags |= PP_FORMAT_422; break; case VLC_CODEC_I411: i_flags |= PP_FORMAT_411; break; case VLC_CODEC_I420: case VLC_CODEC_J420: case VLC_CODEC_YV12: i_flags |= PP_FORMAT_420; break; default: msg_Err( p_filter, "Unsupported input chroma (%4.4s)", (char*)&p_filter->fmt_in.video.i_chroma ); return VLC_EGENERIC; } p_sys = malloc( sizeof( filter_sys_t ) ); if( !p_sys ) return VLC_ENOMEM; p_filter->p_sys = p_sys; p_sys->pp_context = pp_get_context( p_filter->fmt_in.video.i_width, p_filter->fmt_in.video.i_height, i_flags ); if( !p_sys->pp_context ) { msg_Err( p_filter, "Error while creating post processing context." ); free( p_sys ); return VLC_EGENERIC; } config_ChainParse( p_filter, FILTER_PREFIX, ppsz_filter_options, p_filter->p_cfg ); var_Create( p_filter, FILTER_PREFIX "q", VLC_VAR_INTEGER | VLC_VAR_HASCHOICE | VLC_VAR_DOINHERIT | VLC_VAR_ISCOMMAND ); text.psz_string = _("Post processing"); var_Change( p_filter, FILTER_PREFIX "q", VLC_VAR_SETTEXT, &text, NULL ); var_Get( p_filter, FILTER_PREFIX "q", &val_orig ); var_Change( p_filter, FILTER_PREFIX "q", VLC_VAR_DELCHOICE, &val_orig, NULL ); val.psz_string = var_GetNonEmptyString( p_filter, FILTER_PREFIX "name" ); if( val_orig.i_int ) { p_sys->pp_mode = pp_get_mode_by_name_and_quality( val.psz_string ? val.psz_string : "default", val_orig.i_int ); if( !p_sys->pp_mode ) { msg_Err( p_filter, "Error while creating post processing mode." ); free( val.psz_string ); pp_free_context( p_sys->pp_context ); free( p_sys ); return VLC_EGENERIC; } } else { p_sys->pp_mode = NULL; } free( val.psz_string ); for( val.i_int = 0; val.i_int <= PP_QUALITY_MAX; val.i_int++ ) { switch( val.i_int ) { case 0: text.psz_string = _("Disable"); break; case 1: text.psz_string = _("Lowest"); break; case PP_QUALITY_MAX: text.psz_string = _("Highest"); break; default: text.psz_string = NULL; break; } var_Change( p_filter, FILTER_PREFIX "q", VLC_VAR_ADDCHOICE, &val, text.psz_string?&text:NULL ); } vlc_mutex_init( &p_sys->lock ); /* Add the callback at the end to prevent crashes */ var_AddCallback( p_filter, FILTER_PREFIX "q", PPQCallback, NULL ); var_AddCallback( p_filter, FILTER_PREFIX "name", PPNameCallback, NULL ); p_filter->pf_video_filter = PostprocPict; p_sys->b_had_matrix = true; return VLC_SUCCESS; }
int tc_filter(frame_list_t *ptr_, char *options) { vframe_list_t *ptr = (vframe_list_t *)ptr_; static vob_t *vob=NULL; int instance = ptr->filter_id; //---------------------------------- // // filter init // //---------------------------------- if( (ptr->tag & TC_AUDIO)) return 0; if(ptr->tag & TC_FRAME_IS_SKIPPED) return 0; if(ptr->tag & TC_FILTER_INIT) { char *c; int len=0; if((vob = tc_get_vob())==NULL) return(-1); if (vob->im_v_codec == CODEC_RGB) { tc_log_error(MOD_NAME, "filter is not capable for RGB-Mode !"); return(-1); } if (!options || !(len=strlen(options))) { tc_log_error(MOD_NAME, "this filter needs options !"); return(-1); } if (!no_optstr(options)) { do_optstr(options); } // if "pre" is found, delete it if ( (c=pp_lookup(options, "pre")) ) { memmove (c, c+3, &options[len]-c); pre[instance] = 1; } if ( (c=pp_lookup(options, "help")) ) { memmove (c, c+4, &options[len]-c); optstr_help(); } if (pre[instance]) { width[instance] = vob->im_v_width; height[instance]= vob->im_v_height; } else { width[instance] = vob->ex_v_width; height[instance]= vob->ex_v_height; } //tc_log_msg(MOD_NAME, "after pre (%s)", options); mode[instance] = pp_get_mode_by_name_and_quality(options, PP_QUALITY_MAX); if(mode[instance]==NULL) { tc_log_error(MOD_NAME, "internal error (pp_get_mode_by_name_and_quality)"); return(-1); } if(tc_accel & AC_MMXEXT) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_MMX2); else if(tc_accel & AC_3DNOW) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_3DNOW); else if(tc_accel & AC_MMX) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_MMX); else context[instance] = pp_get_context(width[instance], height[instance], 0); if(context[instance]==NULL) { tc_log_error(MOD_NAME, "internal error (pp_get_context) (instance=%d)", instance); return(-1); } // filter init ok. if(verbose) tc_log_info(MOD_NAME, "%s %s #%d", MOD_VERSION, MOD_CAP, ptr->filter_id); return(0); } //---------------------------------- // // filter configure // //---------------------------------- if(ptr->tag & TC_FILTER_GET_CONFIG) { do_getconfig (options); return 0; } //---------------------------------- // // filter close // //---------------------------------- if(ptr->tag & TC_FILTER_CLOSE) { if (mode[instance]) pp_free_mode(mode[instance]); mode[instance] = NULL; if (context[instance]) pp_free_context(context[instance]); context[instance] = NULL; return(0); } //---------------------------------- // // filter frame routine // //---------------------------------- // tag variable indicates, if we are called before // transcodes internal video/audo frame processing routines // or after and determines video/audio context if(((ptr->tag & TC_PRE_M_PROCESS && pre[instance]) || (ptr->tag & TC_POST_M_PROCESS && !pre[instance])) && !(ptr->attributes & TC_FRAME_IS_SKIPPED)) { unsigned char *pp_page[3]; int ppStride[3]; pp_page[0] = ptr->video_buf; pp_page[1] = pp_page[0] + (width[instance] * height[instance]); pp_page[2] = pp_page[1] + (width[instance] * height[instance])/4; ppStride[0] = width[instance]; ppStride[1] = ppStride[2] = width[instance]>>1; pp_postprocess((void *)pp_page, ppStride, pp_page, ppStride, width[instance], height[instance], NULL, 0, mode[instance], context[instance], 0); }