static int config(struct vf_instance *vf, int width, int height, int d_width, int d_height, unsigned int voflags, unsigned int outfmt){ int flags= PP_CPU_CAPS_AUTO; switch(outfmt){ case IMGFMT_444P: flags|= PP_FORMAT_444; break; case IMGFMT_422P: flags|= PP_FORMAT_422; break; case IMGFMT_411P: flags|= PP_FORMAT_411; break; default: flags|= PP_FORMAT_420; break; } if(vf->priv->context) pp_free_context(vf->priv->context); vf->priv->context= pp_get_context(width, height, flags); return vf_next_config(vf,width,height,d_width,d_height,voflags,outfmt); }
void updatePostProc(ADM_PP *pp ) { char stringMode[60]; char stringFQ[60]; stringMode[0]=0; deletePostProc(pp); aprintf("updating post proc\n"); if(pp->postProcType&1) strcat(stringMode,"ha:a:128:7,"); if(pp->postProcType&2) strcat(stringMode,"va:a:128:7,"); if(pp->postProcType&4) strcat(stringMode,"dr:a,"); if(pp->forcedQuant) { sprintf(stringFQ,"fq:%d,",pp->forcedQuant); strcat(stringMode,stringFQ); } if(strlen(stringMode)) // something to do ? { uint32_t ppCaps=0; #if (defined( ARCH_X86) || defined(ARCH_X86_64)) #define ADD(x,y) if( CpuCaps::has##x()) ppCaps|=PP_CPU_CAPS_##y; ADD(MMX,MMX); ADD(3DNOW,3DNOW); ADD(MMXEXT,MMX2); #endif #ifdef HAVE_ALTIVEC ppCaps|=PP_CPU_CAPS_ALTIVEC; #endif pp->ppContext=pp_get_context(pp->w, pp->h, ppCaps ); pp->ppMode=pp_get_mode_by_name_and_quality( stringMode, pp->postProcStrength);; ADM_assert(pp->ppMode); aprintf("Enabled type:%d strength:%d\n", pp->postProcType,pp->postProcStrength); } else // if nothing is selected we may as well set back every thing to 0 { pp->postProcStrength=0; aprintf("Disabled\n"); } }
/** \fn updatePostProc */ bool ADM_PP::update(void) { char stringMode[60]; stringMode[0]=0; cleanup(); aprintf("updating post proc\n"); if(postProcType&1) strcat(stringMode,"ha:a:128:7,"); if(postProcType&2) strcat(stringMode,"va:a:128:7,"); if(postProcType&4) strcat(stringMode,"dr:a,"); if(forcedQuant) { char stringFQ[60]; sprintf(stringFQ,"fq:%d,",forcedQuant); strcat(stringMode,stringFQ); } if(strlen(stringMode)) // something to do ? { uint32_t ppCaps=0; #ifdef ADM_CPU_X86 #define ADD(x,y) if( CpuCaps::has##x()) ppCaps|=PP_CPU_CAPS_##y; ADD(MMX,MMX); ADD(3DNOW,3DNOW); ADD(MMXEXT,MMX2); #endif #ifdef ADM_CPU_ALTIVEC ppCaps|=PP_CPU_CAPS_ALTIVEC; #endif ppContext=pp_get_context(w, h, ppCaps ); ppMode=pp_get_mode_by_name_and_quality( stringMode, postProcStrength);; ADM_assert(ppMode); aprintf("Enabled type:%d strength:%d\n",postProcType,postProcStrength); } else // if nothing is selected we may as well set back every thing to 0 { postProcStrength=0; aprintf("Disabled\n"); } return false; }
static VideoFilter *new_filter(VideoFrameType inpixfmt, VideoFrameType outpixfmt, int *width, int *height, char *options, int threads) { (void) threads; ThisFilter *filter; if ( inpixfmt != FMT_YV12 || outpixfmt != FMT_YV12 ) return NULL; filter = (ThisFilter*) malloc(sizeof(ThisFilter)); if (filter == NULL) { fprintf(stderr,"Couldn't allocate memory for filter\n"); return NULL; } filter->context = pp_get_context(*width, *height, PP_CPU_CAPS_MMX|PP_CPU_CAPS_MMX2|PP_CPU_CAPS_3DNOW); if (filter->context == NULL) { fprintf(stderr,"PostProc: failed to get PP context\n"); free(filter); return NULL; } printf("Filteroptions: %s\n", options); filter->mode = pp_get_mode_by_name_and_quality(options, PP_QUALITY_MAX); if (filter->mode == NULL) { printf("%s", pp_help); free(filter); return NULL; } filter->eprint = 0; filter->vf.filter = &pp; filter->vf.cleanup = &cleanup; TF_INIT(filter); return (VideoFilter *)filter; }
FFPP::FFPP(avxsynth::PClip AChild, const char *PP, avxsynth::IScriptEnvironment *Env) : avxsynth::GenericVideoFilter(AChild) { if (!strcmp(PP, "")) Env->ThrowError("FFPP: PP argument is empty"); PPContext = NULL; PPMode = NULL; SWSTo422P = NULL; SWSFrom422P = NULL; memset(&InputPicture, 0, sizeof(InputPicture)); memset(&OutputPicture, 0, sizeof(OutputPicture)); // due to a parsing bug in libpostproc it can read beyond the end of a string // adding a ',' prevents the bug from manifesting // libav head 2011-08-26 std::string s = PP; s.append(","); PPMode = pp_get_mode_by_name_and_quality(s.c_str(), PP_QUALITY_MAX); if (!PPMode) Env->ThrowError("FFPP: Invalid postprocesing settings"); int64_t Flags = AvisynthToSWSCPUFlags(Env->GetCPUFlags()); if (vi.IsYV12()) { Flags |= PP_FORMAT_420; } else if (vi.IsYUY2()) { Flags |= PP_FORMAT_422; SWSTo422P = FFGetSwsContext(vi.width, vi.height, PIX_FMT_YUYV422, vi.width, vi.height, PIX_FMT_YUV422P, Flags | SWS_BICUBIC, FFGetSwsAssumedColorSpace(vi.width, vi.height)); SWSFrom422P = FFGetSwsContext(vi.width, vi.height, PIX_FMT_YUV422P, vi.width, vi.height, PIX_FMT_YUYV422, Flags | SWS_BICUBIC, FFGetSwsAssumedColorSpace(vi.width, vi.height)); avpicture_alloc(&InputPicture, PIX_FMT_YUV422P, vi.width, vi.height); avpicture_alloc(&OutputPicture, PIX_FMT_YUV422P, vi.width, vi.height); } else { Env->ThrowError("FFPP: Only YV12 and YUY2 video supported"); } /* Flags as passed to pp_get_context will potentially no longer be the same int value, * but it will still have the correct binary representation (which is the important part). */ PPContext = pp_get_context(vi.width, vi.height, (int)Flags); if (!PPContext) Env->ThrowError("FFPP: Failed to create context"); }
static void change_context (GstPostProc * postproc, gint width, gint height) { guint mmx_flags; guint altivec_flags; gint ppflags; GST_DEBUG_OBJECT (postproc, "change_context, width:%d, height:%d", width, height); if ((width != postproc->width) && (height != postproc->height)) { if (postproc->context) pp_free_context (postproc->context); #ifdef HAVE_ORC mmx_flags = orc_target_get_default_flags (orc_target_get_by_name ("mmx")); altivec_flags = orc_target_get_default_flags (orc_target_get_by_name ("altivec")); ppflags = (mmx_flags & ORC_TARGET_MMX_MMX ? PP_CPU_CAPS_MMX : 0) | (mmx_flags & ORC_TARGET_MMX_MMXEXT ? PP_CPU_CAPS_MMX2 : 0) | (mmx_flags & ORC_TARGET_MMX_3DNOW ? PP_CPU_CAPS_3DNOW : 0) | (altivec_flags & ORC_TARGET_ALTIVEC_ALTIVEC ? PP_CPU_CAPS_ALTIVEC : 0); #else mmx_flags = 0; altivec_flags = 0; ppflags = 0; #endif postproc->context = pp_get_context (width, height, PP_FORMAT_420 | ppflags); postproc->width = width; postproc->height = height; postproc->ystride = ROUND_UP_4 (width); postproc->ustride = ROUND_UP_8 (width) / 2; postproc->vstride = ROUND_UP_8 (postproc->ystride) / 2; postproc->ysize = postproc->ystride * ROUND_UP_2 (height); postproc->usize = postproc->ustride * ROUND_UP_2 (height) / 2; postproc->vsize = postproc->vstride * ROUND_UP_2 (height) / 2; GST_DEBUG_OBJECT (postproc, "new strides are (YUV) : %d %d %d", postproc->ystride, postproc->ustride, postproc->vstride); } }
static int pp_config_props(AVFilterLink *inlink) { int flags = PP_CPU_CAPS_AUTO; PPFilterContext *pp = inlink->dst->priv; switch (inlink->format) { case AV_PIX_FMT_YUVJ420P: case AV_PIX_FMT_YUV420P: flags |= PP_FORMAT_420; break; case AV_PIX_FMT_YUVJ422P: case AV_PIX_FMT_YUV422P: flags |= PP_FORMAT_422; break; case AV_PIX_FMT_YUV411P: flags |= PP_FORMAT_411; break; case AV_PIX_FMT_YUVJ444P: case AV_PIX_FMT_YUV444P: flags |= PP_FORMAT_444; break; default: av_assert0(0); } pp->pp_ctx = pp_get_context(inlink->w, inlink->h, flags); if (!pp->pp_ctx) return AVERROR(ENOMEM); return 0; }
void FFMS_VideoSource::ReAdjustPP(PixelFormat VPixelFormat, int Width, int Height) { #ifdef FFMS_USE_POSTPROC if (PPContext) pp_free_context(PPContext); PPContext = NULL; if (!PPMode) return; int Flags = GetPPCPUFlags(); switch (VPixelFormat) { case PIX_FMT_YUV420P: case PIX_FMT_YUVJ420P: Flags |= PP_FORMAT_420; break; case PIX_FMT_YUV422P: case PIX_FMT_YUVJ422P: Flags |= PP_FORMAT_422; break; case PIX_FMT_YUV411P: Flags |= PP_FORMAT_411; break; case PIX_FMT_YUV444P: case PIX_FMT_YUVJ444P: Flags |= PP_FORMAT_444; break; default: ResetPP(); throw FFMS_Exception(FFMS_ERROR_POSTPROCESSING, FFMS_ERROR_UNSUPPORTED, "The video does not have a colorspace suitable for postprocessing"); } PPContext = pp_get_context(Width, Height, Flags); avpicture_free(&PPFrame); avpicture_alloc(&PPFrame, VPixelFormat, Width, Height); #else return; #endif /* FFMS_USE_POSTPROC */ }
bool CDVDVideoPPFFmpeg::CheckInit(int iWidth, int iHeight) { if(m_iInitWidth != iWidth || m_iInitHeight != iHeight) { if(m_pContext || m_pMode) { Dispose(); } m_pContext = pp_get_context(m_pSource->iWidth, m_pSource->iHeight, PPCPUFlags() | PP_FORMAT_420); m_iInitWidth = m_pSource->iWidth; m_iInitHeight = m_pSource->iHeight; m_pMode = pp_get_mode_by_name_and_quality((char *)m_sType.c_str(), PP_QUALITY_MAX); } if(m_pMode) return true; else return false; }
//************************************************************* void ADMVideoLavPPDeint::setup(void) { char string[1024]; uint32_t ppCaps=0; string[0]=0; cleanup(); #ifdef ADM_CPU_X86 #define ADD(x,y) if( CpuCaps::has##x()) ppCaps|=PP_CPU_CAPS_##y; ADD(MMX,MMX); ADD(3DNOW,3DNOW); ADD(MMXEXT,MMX2); #endif cleanup(); #undef ADD #define ADD(z) { if(string[0]) strcat(string,","#z); else strcpy(string,#z);} if(_param->autolevel) ADD(al); switch(_param->deintType) { case PP_BM_NONE:break; case PP_BM_LINEAR_BLEND: ADD(lb);break; case PP_BM_LINEAR_INTER: ADD(li);break; case PP_BM_CUBIC_INTER: ADD(ci);break; case PP_BM_MEDIAN_INTER: ADD(md);break; case PP_BM_FFMPEG_DEINT: ADD(fd);break; } ppcontext=pp_get_context(_info.width, _info.height, ppCaps); ppmode=pp_get_mode_by_name_and_quality(string,1);; ADM_assert(ppcontext); ADM_assert(ppmode); }
/***************************************************************************** * OpenPostproc: probe and open the postproc *****************************************************************************/ static int OpenPostproc( vlc_object_t *p_this ) { filter_t *p_filter = (filter_t *)p_this; filter_sys_t *p_sys; vlc_value_t val, val_orig, text; unsigned i_cpu = vlc_CPU(); int i_flags = 0; if( p_filter->fmt_in.video.i_chroma != p_filter->fmt_out.video.i_chroma || p_filter->fmt_in.video.i_height != p_filter->fmt_out.video.i_height || p_filter->fmt_in.video.i_width != p_filter->fmt_out.video.i_width ) { msg_Err( p_filter, "Filter input and output formats must be identical" ); return VLC_EGENERIC; } /* Set CPU capabilities */ if( i_cpu & CPU_CAPABILITY_MMX ) i_flags |= PP_CPU_CAPS_MMX; if( i_cpu & CPU_CAPABILITY_MMXEXT ) i_flags |= PP_CPU_CAPS_MMX2; if( i_cpu & CPU_CAPABILITY_3DNOW ) i_flags |= PP_CPU_CAPS_3DNOW; if( i_cpu & CPU_CAPABILITY_ALTIVEC ) i_flags |= PP_CPU_CAPS_ALTIVEC; switch( p_filter->fmt_in.video.i_chroma ) { case VLC_CODEC_I444: case VLC_CODEC_J444: /* case VLC_CODEC_YUVA: FIXME: Should work but alpha plane needs to be copied manually and I'm kind of feeling too lazy to write the code to do that ATM (i_pitch vs i_visible_pitch...). */ i_flags |= PP_FORMAT_444; break; case VLC_CODEC_I422: case VLC_CODEC_J422: i_flags |= PP_FORMAT_422; break; case VLC_CODEC_I411: i_flags |= PP_FORMAT_411; break; case VLC_CODEC_I420: case VLC_CODEC_J420: case VLC_CODEC_YV12: i_flags |= PP_FORMAT_420; break; default: msg_Err( p_filter, "Unsupported input chroma (%4.4s)", (char*)&p_filter->fmt_in.video.i_chroma ); return VLC_EGENERIC; } p_sys = malloc( sizeof( filter_sys_t ) ); if( !p_sys ) return VLC_ENOMEM; p_filter->p_sys = p_sys; p_sys->pp_context = pp_get_context( p_filter->fmt_in.video.i_width, p_filter->fmt_in.video.i_height, i_flags ); if( !p_sys->pp_context ) { msg_Err( p_filter, "Error while creating post processing context." ); free( p_sys ); return VLC_EGENERIC; } config_ChainParse( p_filter, FILTER_PREFIX, ppsz_filter_options, p_filter->p_cfg ); var_Create( p_filter, FILTER_PREFIX "q", VLC_VAR_INTEGER | VLC_VAR_HASCHOICE | VLC_VAR_DOINHERIT | VLC_VAR_ISCOMMAND ); text.psz_string = _("Post processing"); var_Change( p_filter, FILTER_PREFIX "q", VLC_VAR_SETTEXT, &text, NULL ); var_Get( p_filter, FILTER_PREFIX "q", &val_orig ); var_Change( p_filter, FILTER_PREFIX "q", VLC_VAR_DELCHOICE, &val_orig, NULL ); val.psz_string = var_GetNonEmptyString( p_filter, FILTER_PREFIX "name" ); if( val_orig.i_int ) { p_sys->pp_mode = pp_get_mode_by_name_and_quality( val.psz_string ? val.psz_string : "default", val_orig.i_int ); if( !p_sys->pp_mode ) { msg_Err( p_filter, "Error while creating post processing mode." ); free( val.psz_string ); pp_free_context( p_sys->pp_context ); free( p_sys ); return VLC_EGENERIC; } } else { p_sys->pp_mode = NULL; } free( val.psz_string ); for( val.i_int = 0; val.i_int <= PP_QUALITY_MAX; val.i_int++ ) { switch( val.i_int ) { case 0: text.psz_string = _("Disable"); break; case 1: text.psz_string = _("Lowest"); break; case PP_QUALITY_MAX: text.psz_string = _("Highest"); break; default: text.psz_string = NULL; break; } var_Change( p_filter, FILTER_PREFIX "q", VLC_VAR_ADDCHOICE, &val, text.psz_string?&text:NULL ); } vlc_mutex_init( &p_sys->lock ); /* Add the callback at the end to prevent crashes */ var_AddCallback( p_filter, FILTER_PREFIX "q", PPQCallback, NULL ); var_AddCallback( p_filter, FILTER_PREFIX "name", PPNameCallback, NULL ); p_filter->pf_video_filter = PostprocPict; p_sys->b_had_matrix = true; return VLC_SUCCESS; }
int tc_filter(frame_list_t *ptr_, char *options) { vframe_list_t *ptr = (vframe_list_t *)ptr_; static vob_t *vob=NULL; int instance = ptr->filter_id; //---------------------------------- // // filter init // //---------------------------------- if( (ptr->tag & TC_AUDIO)) return 0; if(ptr->tag & TC_FRAME_IS_SKIPPED) return 0; if(ptr->tag & TC_FILTER_INIT) { char *c; int len=0; if((vob = tc_get_vob())==NULL) return(-1); if (vob->im_v_codec == CODEC_RGB) { tc_log_error(MOD_NAME, "filter is not capable for RGB-Mode !"); return(-1); } if (!options || !(len=strlen(options))) { tc_log_error(MOD_NAME, "this filter needs options !"); return(-1); } if (!no_optstr(options)) { do_optstr(options); } // if "pre" is found, delete it if ( (c=pp_lookup(options, "pre")) ) { memmove (c, c+3, &options[len]-c); pre[instance] = 1; } if ( (c=pp_lookup(options, "help")) ) { memmove (c, c+4, &options[len]-c); optstr_help(); } if (pre[instance]) { width[instance] = vob->im_v_width; height[instance]= vob->im_v_height; } else { width[instance] = vob->ex_v_width; height[instance]= vob->ex_v_height; } //tc_log_msg(MOD_NAME, "after pre (%s)", options); mode[instance] = pp_get_mode_by_name_and_quality(options, PP_QUALITY_MAX); if(mode[instance]==NULL) { tc_log_error(MOD_NAME, "internal error (pp_get_mode_by_name_and_quality)"); return(-1); } if(tc_accel & AC_MMXEXT) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_MMX2); else if(tc_accel & AC_3DNOW) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_3DNOW); else if(tc_accel & AC_MMX) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_MMX); else context[instance] = pp_get_context(width[instance], height[instance], 0); if(context[instance]==NULL) { tc_log_error(MOD_NAME, "internal error (pp_get_context) (instance=%d)", instance); return(-1); } // filter init ok. if(verbose) tc_log_info(MOD_NAME, "%s %s #%d", MOD_VERSION, MOD_CAP, ptr->filter_id); return(0); } //---------------------------------- // // filter configure // //---------------------------------- if(ptr->tag & TC_FILTER_GET_CONFIG) { do_getconfig (options); return 0; } //---------------------------------- // // filter close // //---------------------------------- if(ptr->tag & TC_FILTER_CLOSE) { if (mode[instance]) pp_free_mode(mode[instance]); mode[instance] = NULL; if (context[instance]) pp_free_context(context[instance]); context[instance] = NULL; return(0); } //---------------------------------- // // filter frame routine // //---------------------------------- // tag variable indicates, if we are called before // transcodes internal video/audo frame processing routines // or after and determines video/audio context if(((ptr->tag & TC_PRE_M_PROCESS && pre[instance]) || (ptr->tag & TC_POST_M_PROCESS && !pre[instance])) && !(ptr->attributes & TC_FRAME_IS_SKIPPED)) { unsigned char *pp_page[3]; int ppStride[3]; pp_page[0] = ptr->video_buf; pp_page[1] = pp_page[0] + (width[instance] * height[instance]); pp_page[2] = pp_page[1] + (width[instance] * height[instance])/4; ppStride[0] = width[instance]; ppStride[1] = ppStride[2] = width[instance]>>1; pp_postprocess((void *)pp_page, ppStride, pp_page, ppStride, width[instance], height[instance], NULL, 0, mode[instance], context[instance], 0); }