//************************************************************* uint8_t ADMVideoLavPPDeint::getFrameNumberNoAlloc(uint32_t frame, uint32_t *len, ADMImage *data, uint32_t *flags) { if(frame>= _info.nb_frames) return 0; // read uncompressed frame if(!_in->getFrameNumberNoAlloc(frame, len,_uncompressed,flags)) return 0; // uint8_t *iBuff[3],*oBuff[3]; int strideTab[3],strideTab2[3]; oBuff[0]=YPLANE(data); oBuff[1]=UPLANE(data); oBuff[2]=VPLANE(data); iBuff[0]=YPLANE(_uncompressed); iBuff[1]=UPLANE(_uncompressed); iBuff[2]=VPLANE(_uncompressed); strideTab[0]=strideTab2[0]=_info.width; strideTab[1]=strideTab2[1]=_info.width>>1; strideTab[2]=strideTab2[2]=_info.width>>1; int type; if(_uncompressed->flags&AVI_KEY_FRAME) type=1; else if(_uncompressed->flags & AVI_B_FRAME) type=3; else type=2; pp_postprocess( iBuff, strideTab, oBuff, strideTab2, _info.width, _info.height, NULL, 0, ppmode, ppcontext, type); // I ? data->copyInfo(_uncompressed); return 1; }
bool CDVDVideoPPFFmpeg::Process(DVDVideoPicture* pPicture) { m_pSource = pPicture; if(m_pSource->format != RENDER_FMT_YUV420P) return false; if( !CheckInit(m_pSource->iWidth, m_pSource->iHeight) ) { CLog::Log(LOGERROR, "Initialization of ffmpeg postprocessing failed"); return false; } //If no target was set or we are using internal buffer, make sure it's correctly sized if(m_pTarget == &m_FrameBuffer || !m_pTarget) { if(CheckFrameBuffer(m_pSource)) m_pTarget = &m_FrameBuffer; else { m_pTarget = NULL; return false; } } int pict_type = (m_pSource->qscale_type != DVP_QSCALE_MPEG1) ? PP_PICT_TYPE_QP2 : 0; pp_postprocess((const uint8_t**)m_pSource->data, m_pSource->iLineSize, m_pTarget->data, m_pTarget->iLineSize, m_pSource->iWidth, m_pSource->iHeight, m_pSource->qp_table, m_pSource->qstride, m_pMode, m_pContext, pict_type); //m_pSource->iFrameType); //Copy frame information over to target, but make sure it is set as allocated should decoder have forgotten m_pTarget->iFlags = m_pSource->iFlags | DVP_FLAG_ALLOCATED; if (m_deinterlace) m_pTarget->iFlags &= ~DVP_FLAG_INTERLACED; m_pTarget->iFrameType = m_pSource->iFrameType; m_pTarget->iRepeatPicture = m_pSource->iRepeatPicture;; m_pTarget->iDuration = m_pSource->iDuration; m_pTarget->qp_table = m_pSource->qp_table; m_pTarget->qstride = m_pSource->qstride; m_pTarget->qscale_type = m_pSource->qscale_type; m_pTarget->iDisplayHeight = m_pSource->iDisplayHeight; m_pTarget->iDisplayWidth = m_pSource->iDisplayWidth; m_pTarget->pts = m_pSource->pts; m_pTarget->format = RENDER_FMT_YUV420P; return true; }
/*------------------------------------------------------------------*/ uint8_t decoderMpeg::uncompress(uint8_t *in,uint8_t *out,uint32_t len,uint32_t *flag) { if(flag) *flag=0; feedData(len,in); const mpeg2_info_t *info ; uint8_t *t; t=(uint8_t *) MPEG2DEC->fbuf[0]->buf[0]; mpeg2_cleanup(MPEG2DEC); info= mpeg2_info (MPEG2DEC); #ifndef ADM_BIG_ENDIAN_ZZ if(_postproc.postProcType && _postproc.postProcStrength) { // we do postproc ! // keep oBuff[0]=out; oBuff[1]=out+_w*_h; oBuff[2]=out+((_w*_h*5)>>2); iBuff[0]=t; iBuff[1]=t+_w*_h; iBuff[2]=t+((_w*_h*5)>>2); strideTab[0]=strideTab2[0]=_w; strideTab[1]=strideTab2[1]=_w>>1; strideTab[2]=strideTab2[2]=_w>>1; /* void pp_postprocess(uint8_t * src[3], int srcStride[3], uint8_t * dst[3], int dstStride[3], int horizontalSize, int verticalSize, QP_STORE_T *QP_store, int QP_stride, pp_mode_t *mode, pp_context_t *ppContext, int pict_type); */ pp_postprocess( iBuff, strideTab, oBuff, strideTab2, _w, _h, MPEG2DEC->decoder.quant, MPEG2DEC->decoder.quant_stride, _postproc.ppMode, _postproc.ppContext, MPEG2DEC->decoder.coding_type); printf("Postprocessed\n"); }
/** \fn getFrame \brief Get a processed frame */ bool lavDeint::getNextFrame(uint32_t *fn,ADMImage *image) { // since we do nothing, just get the output of previous filter if(false==previousFilter->getNextFrame(fn,src)) { ADM_warning("rotate : Cannot get frame\n"); return false; } // const uint8_t *iBuff[3]; uint8_t *oBuff[3]; int strideIn[3],strideOut[3]; uint32_t stride[3]; image->GetWritePlanes(oBuff); src->GetReadPlanes((uint8_t **)iBuff); image->GetPitches(stride); for(int i=0;i<3;i++) strideOut[i]=stride[i]; src->GetPitches(stride); for(int i=0;i<3;i++) strideIn[i]=stride[i]; int type; if(src->flags&AVI_KEY_FRAME) type=1; else if(src->flags & AVI_B_FRAME) type=3; else type=2; pp_postprocess( iBuff, strideIn, oBuff, strideOut, info.width, info.height, NULL, 0, ppmode, ppcontext, type); // I ? image->copyInfo(src); return true; }
static int pp(VideoFilter *vf, VideoFrame *frame, int field) { (void)field; ThisFilter* tf = (ThisFilter*)vf; TF_VARS; TF_START; tf->src[0] = tf->dst[0] = frame->buf; tf->src[1] = tf->dst[1] = frame->buf + tf->ysize; tf->src[2] = tf->dst[2] = frame->buf + tf->ysize + tf->csize; if (frame->qscale_table == NULL) frame->qstride = 0; tf->ysize = (frame->width) * (frame->height); tf->csize = tf->ysize / 4; tf->width = frame->width; tf->height = frame->height; tf->srcStride[0] = tf->ysize / (tf->height); tf->srcStride[1] = tf->csize / (tf->height) * 2; tf->srcStride[2] = tf->csize / (tf->height) * 2; tf->dstStride[0] = tf->ysize / (tf->height); tf->dstStride[1] = tf->csize / (tf->height) * 2; tf->dstStride[2] = tf->csize / (tf->height) * 2; pp_postprocess( (const uint8_t**)tf->src, tf->srcStride, tf->dst, tf->dstStride, frame->width, frame->height, (signed char *)(frame->qscale_table), frame->qstride, tf->mode, tf->context, PP_FORMAT_420); TF_END(tf, "PostProcess: "); return 0; }
uint8_t decoderFF::uncompress(uint8_t *in,uint8_t *out,uint32_t len,uint32_t *flagz) { int got_picture=0; uint8_t *oBuff[3]; int strideTab[3]; int strideTab2[3]; int ret=0; if(len==0 && !_allowNull) // Null frame, silently skipped { if(flagz) *flagz=AVI_KEY_FRAME; printf("\n ff4: null frame\n"); return 1; } ret= avcodec_decode_video(_context,&_frame,&got_picture, in, len); if(0>ret && !_context->hurry_up) { printf("\n error in FFMP43/mpeg4!\n"); return 0; } if(!got_picture && !_context->hurry_up) { // Some encoder code a vop header with the // vop flag set to 0 // it is meant to mean frame skipped but very dubious if(len<8) { printf("Probably pseudo black frame...\n"); return 1; } // allow null means we allow null frame in and so potentially // have no frame out for a time // in that case silently fill with black and returns it as KF if(_allowNull) { if(flagz) *flagz=AVI_KEY_FRAME; memset(out,0,_w*_h); memset(out+_w*_h,128,(_w*_h)>>1); printf("\n ignoring got pict ==0\n"); return 1; } printf("\n error in FFMP43/mpeg4!: got picture \n"); //GUI_Alert("Please retry with misc->Turbo off"); //return 1; return 0; } if(_context->hurry_up) { if(flagz) { *flagz=frameType(); } return 1; } // convert ffmpeg to our format : yv12 uint8_t **src; uint32_t stridex[3]; uint8_t *inx[3]; switch(_context->pix_fmt) { case PIX_FMT_YUV411P: stridex[0]= _frame.linesize[0 ]; stridex[1]= _frame.linesize[1 ]; stridex[2]= _frame.linesize[2 ]; inx[0]=_frame.data[0]; inx[1]=_frame.data[1]; inx[2]=_frame.data[2]; COL_411_YV12(inx,stridex,_internalBuffer,_w,_h); oBuff[0]=_internalBuffer; oBuff[1]=_internalBuffer+_w*_h; oBuff[2]=oBuff[1]+((_w*_h)>>2); src= (uint8_t **)oBuff; _frame.linesize[0 ]=_w; _frame.linesize[1 ]=_w>>1; _frame.linesize[2 ]=_w>>1; break; case PIX_FMT_YUV422P: stridex[0]= _frame.linesize[0 ]; stridex[1]= _frame.linesize[1 ]; stridex[2]= _frame.linesize[2 ]; inx[0]=_frame.data[0]; inx[1]=_frame.data[1]; inx[2]=_frame.data[2]; COL_422_YV12(inx,stridex,_internalBuffer,_w,_h); oBuff[0]=_internalBuffer; oBuff[1]=_internalBuffer+_w*_h; oBuff[2]=oBuff[1]+((_w*_h)>>2); src= (uint8_t **)oBuff; _frame.linesize[0 ]=_w; _frame.linesize[1 ]=_w>>1; _frame.linesize[2 ]=_w>>1; break; default: // Default is YV12 or I420 // In that case depending on swap u/v // we do it or not src= (uint8_t **) _frame.data; #ifndef ADM_BIG_ENDIAN_ZZ if(_postproc.postProcType && _postproc.postProcStrength) { // we do postproc ! // keep oBuff[0]=_internalBuffer; oBuff[1]=_internalBuffer+_frame.linesize[0]*_h; oBuff[2]=oBuff[1]+((_frame.linesize[1]*_h)>>1); for(uint32_t s=0;s<3;s++) { strideTab[s]=strideTab2[s]=_frame.linesize[s]; } pp_postprocess( _frame.data, strideTab, oBuff, strideTab2, _w, _h, _frame.qscale_table, _frame.qstride, _postproc.ppMode, _postproc.ppContext, _frame.pict_type); src= (uint8_t **)oBuff; } #endif break; }
/** \fn process */ bool ADM_PP::process(class ADMImage *src, class ADMImage *dest) { int type; int ww,hh; uint32_t border; // return dest->duplicate(src); border=w&(7); ww=w-border; hh=h&(~1); ADM_assert(src); ADM_assert(dest); ADM_assert(ppMode); ADM_assert(ppContext); // #warning FIXME should be FF_I_TYPE/B/P if(src->flags & AVI_KEY_FRAME) type=1; else if(src->flags & AVI_B_FRAME) type=3; else type=2; ADM_assert(src->_colorspace==ADM_COLOR_YV12); // we do postproc ! // keep uint8_t *oBuff[3]; const uint8_t *xBuff[3]; uint8_t *iBuff[3]; int strideTab[3]; int strideTab2[3]; int iStrideTab2[3],iStrideTab[3]; src->GetReadPlanes(iBuff); src->GetPitches(strideTab); dest->GetPitches(strideTab2); dest->GetWritePlanes(oBuff); if(swapuv) { uint8_t *s=oBuff[1]; oBuff[1]=oBuff[2]; oBuff[2]=s; } for(int i=0;i<3;i++) { iStrideTab[i]=strideTab[i]; iStrideTab2[i]=strideTab2[i]; xBuff[i]=iBuff[i]; } pp_postprocess( xBuff, iStrideTab, oBuff, iStrideTab2, ww, hh, (int8_t *)(src->quant), src->_qStride, ppMode, ppContext, type); // img type /* If there is a chroma block that needs padding (width not multiple of 16) while postprocessing, we process up to the nearest 16 multiple and just copy luma & chroma info that was left over */ if(border) { uint8_t *src,*dst; uint32_t stridein,strideout,right; right=ww; // Luma dst=oBuff[0]+right; src=(uint8_t *)(xBuff[0]+right); for(int y=h;y>0;y--) { memcpy(dst,src,border); dst+=strideTab2[0]; src+=strideTab[0]; } // Chroma border>>=1; right>>=1; dst=oBuff[1]+right; src=(uint8_t *)(xBuff[1]+right); // for(int y=h>>1;y>0;y--) { memcpy(dst,src,border); dst+=strideTab2[1]; src+=strideTab[1]; } // dst=oBuff[2]+right; src=(uint8_t *)(xBuff[2]+right); // for(int y=h>>1;y>0;y--) { memcpy(dst,src,border); dst+=strideTab2[2]; src+=strideTab[2]; } } return true; }
uint8_t ADMVideoForcedPP::getFrameNumberNoAlloc(uint32_t frame, uint32_t *len, ADMImage *data, uint32_t *flags) { uint32_t page=_info.width*_info.height; *len=(page*3)>>1; if(frame>=_info.nb_frames) return 0; if(!(_postproc.postProcType && _postproc.postProcStrength) ) { // disabled if(!_in->getFrameNumberNoAlloc(frame, len,data,flags)) return 0; return 1; } // we do postproc ! // keep uint8_t *iBuff[3],*oBuff[3]; int strideTab[3],strideTab2[3]; if(!_in->getFrameNumberNoAlloc(frame, len,_uncompressed,flags)) return 0; oBuff[0]=YPLANE(data); oBuff[1]=VPLANE(data); oBuff[2]=UPLANE(data); iBuff[0]=YPLANE(_uncompressed); iBuff[1]=VPLANE(_uncompressed); iBuff[2]=UPLANE(_uncompressed); strideTab[0]=strideTab2[0]=_info.width; strideTab[1]=strideTab2[1]=_info.width>>1; strideTab[2]=strideTab2[2]=_info.width>>1; int type; if(_uncompressed->flags&AVI_KEY_FRAME) type=1; else if(_uncompressed->flags & AVI_B_FRAME) type=3; else type=2; pp_postprocess( iBuff, strideTab, oBuff, strideTab2, _info.width, _info.height, NULL, 0, _postproc.ppMode, _postproc.ppContext, type); // I ? data->copyInfo(_uncompressed); //printf("Type:%d\n",type); return 1; }
int tc_filter(frame_list_t *ptr_, char *options) { vframe_list_t *ptr = (vframe_list_t *)ptr_; static vob_t *vob=NULL; int instance = ptr->filter_id; //---------------------------------- // // filter init // //---------------------------------- if( (ptr->tag & TC_AUDIO)) return 0; if(ptr->tag & TC_FRAME_IS_SKIPPED) return 0; if(ptr->tag & TC_FILTER_INIT) { char *c; int len=0; if((vob = tc_get_vob())==NULL) return(-1); if (vob->im_v_codec == CODEC_RGB) { tc_log_error(MOD_NAME, "filter is not capable for RGB-Mode !"); return(-1); } if (!options || !(len=strlen(options))) { tc_log_error(MOD_NAME, "this filter needs options !"); return(-1); } if (!no_optstr(options)) { do_optstr(options); } // if "pre" is found, delete it if ( (c=pp_lookup(options, "pre")) ) { memmove (c, c+3, &options[len]-c); pre[instance] = 1; } if ( (c=pp_lookup(options, "help")) ) { memmove (c, c+4, &options[len]-c); optstr_help(); } if (pre[instance]) { width[instance] = vob->im_v_width; height[instance]= vob->im_v_height; } else { width[instance] = vob->ex_v_width; height[instance]= vob->ex_v_height; } //tc_log_msg(MOD_NAME, "after pre (%s)", options); mode[instance] = pp_get_mode_by_name_and_quality(options, PP_QUALITY_MAX); if(mode[instance]==NULL) { tc_log_error(MOD_NAME, "internal error (pp_get_mode_by_name_and_quality)"); return(-1); } if(tc_accel & AC_MMXEXT) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_MMX2); else if(tc_accel & AC_3DNOW) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_3DNOW); else if(tc_accel & AC_MMX) context[instance] = pp_get_context(width[instance], height[instance], PP_CPU_CAPS_MMX); else context[instance] = pp_get_context(width[instance], height[instance], 0); if(context[instance]==NULL) { tc_log_error(MOD_NAME, "internal error (pp_get_context) (instance=%d)", instance); return(-1); } // filter init ok. if(verbose) tc_log_info(MOD_NAME, "%s %s #%d", MOD_VERSION, MOD_CAP, ptr->filter_id); return(0); } //---------------------------------- // // filter configure // //---------------------------------- if(ptr->tag & TC_FILTER_GET_CONFIG) { do_getconfig (options); return 0; } //---------------------------------- // // filter close // //---------------------------------- if(ptr->tag & TC_FILTER_CLOSE) { if (mode[instance]) pp_free_mode(mode[instance]); mode[instance] = NULL; if (context[instance]) pp_free_context(context[instance]); context[instance] = NULL; return(0); } //---------------------------------- // // filter frame routine // //---------------------------------- // tag variable indicates, if we are called before // transcodes internal video/audo frame processing routines // or after and determines video/audio context if(((ptr->tag & TC_PRE_M_PROCESS && pre[instance]) || (ptr->tag & TC_POST_M_PROCESS && !pre[instance])) && !(ptr->attributes & TC_FRAME_IS_SKIPPED)) { unsigned char *pp_page[3]; int ppStride[3]; pp_page[0] = ptr->video_buf; pp_page[1] = pp_page[0] + (width[instance] * height[instance]); pp_page[2] = pp_page[1] + (width[instance] * height[instance])/4; ppStride[0] = width[instance]; ppStride[1] = ppStride[2] = width[instance]>>1; pp_postprocess((void *)pp_page, ppStride, pp_page, ppStride, width[instance], height[instance], NULL, 0, mode[instance], context[instance], 0); }
FFMS_Frame *FFMS_VideoSource::OutputFrame(AVFrame *Frame) { SanityCheckFrameForData(Frame); if (LastFrameWidth != CodecContext->width || LastFrameHeight != CodecContext->height || LastFramePixelFormat != CodecContext->pix_fmt) { ReAdjustPP(CodecContext->pix_fmt, CodecContext->width, CodecContext->height); if (TargetHeight > 0 && TargetWidth > 0 && !TargetPixelFormats.empty()) { if (!InputFormatOverridden) { InputFormat = PIX_FMT_NONE; InputColorSpace = AVCOL_SPC_UNSPECIFIED; InputColorRange = AVCOL_RANGE_UNSPECIFIED; } ReAdjustOutputFormat(); } } #ifdef FFMS_USE_POSTPROC if (PPMode) { pp_postprocess(const_cast<const uint8_t **>(Frame->data), Frame->linesize, PPFrame.data, PPFrame.linesize, CodecContext->width, CodecContext->height, Frame->qscale_table, Frame->qstride, PPMode, PPContext, Frame->pict_type | (Frame->qscale_type ? PP_PICT_TYPE_QP2 : 0)); if (SWS) { sws_scale(SWS, PPFrame.data, PPFrame.linesize, 0, CodecContext->height, SWSFrame.data, SWSFrame.linesize); CopyAVPictureFields(SWSFrame, LocalFrame); } else { CopyAVPictureFields(PPFrame, LocalFrame); } } else { if (SWS) { sws_scale(SWS, Frame->data, Frame->linesize, 0, CodecContext->height, SWSFrame.data, SWSFrame.linesize); CopyAVPictureFields(SWSFrame, LocalFrame); } else { // Special case to avoid ugly casts for (int i = 0; i < 4; i++) { LocalFrame.Data[i] = Frame->data[i]; LocalFrame.Linesize[i] = Frame->linesize[i]; } } } #else // FFMS_USE_POSTPROC if (SWS) { sws_scale(SWS, Frame->data, Frame->linesize, 0, CodecContext->height, SWSFrame.data, SWSFrame.linesize); CopyAVPictureFields(SWSFrame, LocalFrame); } else { // Special case to avoid ugly casts for (int i = 0; i < 4; i++) { LocalFrame.Data[i] = Frame->data[i]; LocalFrame.Linesize[i] = Frame->linesize[i]; } } #endif // FFMS_USE_POSTPROC LocalFrame.EncodedWidth = CodecContext->width; LocalFrame.EncodedHeight = CodecContext->height; LocalFrame.EncodedPixelFormat = CodecContext->pix_fmt; LocalFrame.ScaledWidth = TargetWidth; LocalFrame.ScaledHeight = TargetHeight; LocalFrame.ConvertedPixelFormat = OutputFormat; LocalFrame.KeyFrame = Frame->key_frame; LocalFrame.PictType = av_get_picture_type_char(Frame->pict_type); LocalFrame.RepeatPict = Frame->repeat_pict; LocalFrame.InterlacedFrame = Frame->interlaced_frame; LocalFrame.TopFieldFirst = Frame->top_field_first; LocalFrame.ColorSpace = OutputColorSpace; LocalFrame.ColorRange = OutputColorRange; LastFrameHeight = CodecContext->height; LastFrameWidth = CodecContext->width; LastFramePixelFormat = CodecContext->pix_fmt; return &LocalFrame; }