static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_outpic; const int v1 = -1; const int v2 = 3; /* 2^3 = 8 */ const unsigned i_visible_lines = p_pic->p[Y_PLANE].i_visible_lines; const unsigned i_visible_pitch = p_pic->p[Y_PLANE].i_visible_pitch; p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { picture_Release( p_pic ); return NULL; } if (!IS_YUV_420_10BITS(p_pic->format.i_chroma)) SHARPEN_FRAME(255, uint8_t); else SHARPEN_FRAME(1023, uint16_t); plane_CopyPixels( &p_outpic->p[U_PLANE], &p_pic->p[U_PLANE] ); plane_CopyPixels( &p_outpic->p[V_PLANE], &p_pic->p[V_PLANE] ); return CopyInfoAndRelease( p_outpic, p_pic ); }
static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { filter_sys_t *p_sys = p_filter->p_sys; vlc_mutex_lock( &p_sys->mask_lock ); plane_t *p_mask = p_sys->p_mask->p+A_PLANE; plane_t *p_apic = p_pic->p+A_PLANE; if( p_mask->i_visible_pitch != p_apic->i_visible_pitch || p_mask->i_visible_lines != p_apic->i_visible_lines ) { msg_Err( p_filter, "Mask size (%d x %d) and image size (%d x %d) " "don't match. The mask will not be applied.", p_mask->i_visible_pitch, p_mask->i_visible_lines, p_apic->i_visible_pitch, p_apic->i_visible_lines ); } else { plane_CopyPixels( p_apic, p_mask ); } vlc_mutex_unlock( &p_sys->mask_lock ); return p_pic; }
void picture_CopyPixels( picture_t *p_dst, const picture_t *p_src ) { int i; for( i = 0; i < p_src->i_planes ; i++ ) plane_CopyPixels( p_dst->p+i, p_src->p+i ); }
/***************************************************************************** * Render: displays previously rendered output ***************************************************************************** * This function send the currently rendered image to adjust modified image, * waits until it is displayed and switch the two rendering buffers, preparing * next frame. *****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_outpic; filter_sys_t *p_sys = p_filter->p_sys; vlc_mutex_lock( &p_sys->lock ); int i_simthres = p_sys->i_simthres; int i_satthres = p_sys->i_satthres; int i_color = p_sys->i_color; vlc_mutex_unlock( &p_sys->lock ); if( !p_pic ) return NULL; p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { picture_Release( p_pic ); return NULL; } /* Copy the Y plane */ plane_CopyPixels( &p_outpic->p[Y_PLANE], &p_pic->p[Y_PLANE] ); /* * Do the U and V planes */ int refu, refv, reflength; GetReference( &refu, &refv, &reflength, i_color ); for( int y = 0; y < p_pic->p[U_PLANE].i_visible_lines; y++ ) { uint8_t *p_src_u = &p_pic->p[U_PLANE].p_pixels[y * p_pic->p[U_PLANE].i_pitch]; uint8_t *p_src_v = &p_pic->p[V_PLANE].p_pixels[y * p_pic->p[V_PLANE].i_pitch]; uint8_t *p_dst_u = &p_outpic->p[U_PLANE].p_pixels[y * p_outpic->p[U_PLANE].i_pitch]; uint8_t *p_dst_v = &p_outpic->p[V_PLANE].p_pixels[y * p_outpic->p[V_PLANE].i_pitch]; for( int x = 0; x < p_pic->p[U_PLANE].i_visible_pitch; x++ ) { if( IsSimilar( *p_src_u - 0x80, *p_src_v - 0x80, refu, refv, reflength, i_satthres, i_simthres ) ) { *p_dst_u++ = *p_src_u; *p_dst_v++ = *p_src_v; } else { *p_dst_u++ = 0x80; *p_dst_v++ = 0x80; } p_src_u++; p_src_v++; } } return CopyInfoAndRelease( p_outpic, p_pic ); }
static picture_t *Filter(filter_t *filter, picture_t *src) { filter_sys_t *sys = filter->p_sys; picture_t *dst = filter_NewPicture(filter); if (!dst) { picture_Release(src); return NULL; } vlc_mutex_lock(&sys->lock); float strength = VLC_CLIP(sys->strength, STRENGTH_MIN, STRENGTH_MAX); int radius = VLC_CLIP((sys->radius + 1) & ~1, RADIUS_MIN, RADIUS_MAX); vlc_mutex_unlock(&sys->lock); const video_format_t *fmt = &filter->fmt_in.video; struct vf_priv_s *cfg = &sys->cfg; cfg->thresh = (1 << 15) / strength; if (cfg->radius != radius) { cfg->radius = radius; cfg->buf = aligned_alloc(16, (((fmt->i_width + 15) & ~15) * (cfg->radius + 1) / 2 + 32) * sizeof(*cfg->buf)); } for (int i = 0; i < dst->i_planes; i++) { const plane_t *srcp = &src->p[i]; plane_t *dstp = &dst->p[i]; const vlc_chroma_description_t *chroma = sys->chroma; int w = fmt->i_width * chroma->p[i].w.num / chroma->p[i].w.den; int h = fmt->i_height * chroma->p[i].h.num / chroma->p[i].h.den; int r = (cfg->radius * chroma->p[i].w.num / chroma->p[i].w.den + cfg->radius * chroma->p[i].h.num / chroma->p[i].h.den) / 2; r = VLC_CLIP((r + 1) & ~1, RADIUS_MIN, RADIUS_MAX); if (__MIN(w, h) > 2 * r && cfg->buf) { filter_plane(cfg, dstp->p_pixels, srcp->p_pixels, w, h, dstp->i_pitch, srcp->i_pitch, r); } else { plane_CopyPixels(dstp, srcp); } } picture_CopyProperties(dst, src); picture_Release(src); return dst; }
/***************************************************************************** * theora_CopyPicture: copy a picture from theora internal buffers to a * picture_t structure. *****************************************************************************/ static void theora_CopyPicture( picture_t *p_pic, th_ycbcr_buffer ycbcr ) { int i_plane, i_planes; /* th_img_plane int width The width of this plane. int height The height of this plane. int stride The offset in bytes between successive rows. unsigned char *data A pointer to the beginning of the first row. Detailed Description A buffer for a single color plane in an uncompressed image. This contains the image data in a left-to-right, top-down format. Each row of pixels is stored contiguously in memory, but successive rows need not be. Use stride to compute the offset of the next row. The encoder accepts both positive stride values (top-down in memory) and negative (bottom-up in memory). The decoder currently always generates images with positive strides. typedef th_img_plane th_ycbcr_buffer[3] */ i_planes = __MIN(p_pic->i_planes, 3); for( i_plane = 0; i_plane < i_planes; i_plane++ ) { plane_t src; src.i_lines = ycbcr[i_plane].height; src.p_pixels = ycbcr[i_plane].data; src.i_pitch = ycbcr[i_plane].stride; src.i_visible_pitch = src.i_pitch; src.i_visible_lines = src.i_lines; plane_CopyPixels( &p_pic->p[i_plane], &src ); } }
/***************************************************************************** * Render: displays previously rendered output ***************************************************************************** * This function send the currently rendered image to Distort image, waits * until it is displayed and switch the two rendering buffers, preparing next * frame. *****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_outpic; unsigned int w, h; uint8_t u,v; picture_t *p_converted; video_format_t fmt_out; memset( &fmt_out, 0, sizeof(video_format_t) ); fmt_out.p_palette = NULL; if( !p_pic ) return NULL; p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { picture_Release( p_pic ); return NULL; } if( !p_filter->p_sys->p_image ) p_filter->p_sys->p_image = image_HandlerCreate( p_filter ); /* chrominance */ u = p_filter->p_sys->u; v = p_filter->p_sys->v; for( int y = 0; y < p_outpic->p[U_PLANE].i_lines; y++ ) { memset( p_outpic->p[U_PLANE].p_pixels+y*p_outpic->p[U_PLANE].i_pitch, u, p_outpic->p[U_PLANE].i_pitch ); memset( p_outpic->p[V_PLANE].p_pixels+y*p_outpic->p[V_PLANE].i_pitch, v, p_outpic->p[V_PLANE].i_pitch ); if( v == 0 && u != 0 ) u --; else if( u == 0xff ) v --; else if( v == 0xff ) u ++; else if( u == 0 ) v ++; } /* luminance */ plane_CopyPixels( &p_outpic->p[Y_PLANE], &p_pic->p[Y_PLANE] ); /* image visualization */ fmt_out = p_filter->fmt_out.video; fmt_out.i_width = p_filter->fmt_out.video.i_width*p_filter->p_sys->scale/150; fmt_out.i_height = p_filter->fmt_out.video.i_height*p_filter->p_sys->scale/150; fmt_out.i_visible_width = fmt_out.i_width; fmt_out.i_visible_height = fmt_out.i_height; p_converted = image_Convert( p_filter->p_sys->p_image, p_pic, &(p_pic->format), &fmt_out ); if( p_converted ) { #define copyimage( plane, b ) \ for( int y = 0; y<p_converted->p[plane].i_visible_lines; y++ ) { \ for( int x = 0; x<p_converted->p[plane].i_visible_pitch; x++ ) { \ int nx, ny; \ if( p_filter->p_sys->yinc == 1 ) \ ny= y; \ else \ ny = p_converted->p[plane].i_visible_lines-y; \ if( p_filter->p_sys->xinc == 1 ) \ nx = x; \ else \ nx = p_converted->p[plane].i_visible_pitch-x; \ p_outpic->p[plane].p_pixels[(p_filter->p_sys->x*b+nx)+(ny+p_filter->p_sys->y*b)*p_outpic->p[plane].i_pitch ] = p_converted->p[plane].p_pixels[y*p_converted->p[plane].i_pitch+x]; \ } } copyimage( Y_PLANE, 2 ); copyimage( U_PLANE, 1 ); copyimage( V_PLANE, 1 ); #undef copyimage picture_Release( p_converted ); } else { msg_Err( p_filter, "Image scaling failed miserably." ); } p_filter->p_sys->x += p_filter->p_sys->xinc; p_filter->p_sys->y += p_filter->p_sys->yinc; p_filter->p_sys->scale += p_filter->p_sys->scaleinc; if( p_filter->p_sys->scale >= 50 ) p_filter->p_sys->scaleinc = -1; if( p_filter->p_sys->scale <= 1 ) p_filter->p_sys->scaleinc = 1; w = p_filter->fmt_out.video.i_width*p_filter->p_sys->scale/150; h = p_filter->fmt_out.video.i_height*p_filter->p_sys->scale/150; if( p_filter->p_sys->x*2 + w >= p_filter->fmt_out.video.i_width ) p_filter->p_sys->xinc = -1; if( p_filter->p_sys->x <= 0 ) p_filter->p_sys->xinc = 1; if( p_filter->p_sys->x*2 + w >= p_filter->fmt_out.video.i_width ) p_filter->p_sys->x = (p_filter->fmt_out.video.i_width-w)/2; if( p_filter->p_sys->y*2 + h >= p_filter->fmt_out.video.i_height ) p_filter->p_sys->y = (p_filter->fmt_out.video.i_height-h)/2; if( p_filter->p_sys->y*2 + h >= p_filter->fmt_out.video.i_height ) p_filter->p_sys->yinc = -1; if( p_filter->p_sys->y <= 0 ) p_filter->p_sys->yinc = 1; for( int y = 0; y < 16; y++ ) { if( p_filter->p_sys->v == 0 && p_filter->p_sys->u != 0 ) p_filter->p_sys->u -= 1; else if( p_filter->p_sys->u == 0xff ) p_filter->p_sys->v -= 1; else if( p_filter->p_sys->v == 0xff ) p_filter->p_sys->u += 1; else if( p_filter->p_sys->u == 0 ) p_filter->p_sys->v += 1; } return CopyInfoAndRelease( p_outpic, p_pic ); }
/* See header for function doc. */ void ComposeFrame( filter_t *p_filter, picture_t *p_outpic, picture_t *p_inpic_top, picture_t *p_inpic_bottom, compose_chroma_t i_output_chroma, bool swapped_uv_conversion ) { assert( p_outpic != NULL ); assert( p_inpic_top != NULL ); assert( p_inpic_bottom != NULL ); /* Valid 4:2:0 chroma handling modes. */ assert( i_output_chroma == CC_ALTLINE || i_output_chroma == CC_UPCONVERT || i_output_chroma == CC_SOURCE_TOP || i_output_chroma == CC_SOURCE_BOTTOM || i_output_chroma == CC_MERGE ); filter_sys_t *p_sys = p_filter->p_sys; const bool b_upconvert_chroma = i_output_chroma == CC_UPCONVERT; for( int i_plane = 0 ; i_plane < p_inpic_top->i_planes ; i_plane++ ) { bool b_is_chroma_plane = ( i_plane == U_PLANE || i_plane == V_PLANE ); int i_out_plane; if( b_is_chroma_plane && b_upconvert_chroma && swapped_uv_conversion ) { if( i_plane == U_PLANE ) i_out_plane = V_PLANE; else /* V_PLANE */ i_out_plane = U_PLANE; } else { i_out_plane = i_plane; } /* Copy luma or chroma, alternating between input fields. */ if( !b_is_chroma_plane || i_output_chroma == CC_ALTLINE ) { /* Do an alternating line copy. This is always done for luma, and for 4:2:2 chroma. It can be requested for 4:2:0 chroma using CC_ALTLINE (see function doc). Note that when we get here, the number of lines matches in input and output. */ plane_t dst_top; plane_t dst_bottom; plane_t src_top; plane_t src_bottom; FieldFromPlane( &dst_top, &p_outpic->p[i_out_plane], 0 ); FieldFromPlane( &dst_bottom, &p_outpic->p[i_out_plane], 1 ); FieldFromPlane( &src_top, &p_inpic_top->p[i_plane], 0 ); FieldFromPlane( &src_bottom, &p_inpic_bottom->p[i_plane], 1 ); /* Copy each field from the corresponding source. */ plane_CopyPixels( &dst_top, &src_top ); plane_CopyPixels( &dst_bottom, &src_bottom ); } else /* Input 4:2:0, on a chroma plane, and not in altline mode. */ { if( i_output_chroma == CC_UPCONVERT ) { /* Upconverting copy - use all data from both input fields. This produces an output picture with independent chroma for each field. It can be used for general input when the two input frames are different. The output is 4:2:2, but the input is 4:2:0. Thus the output has twice the lines of the input, and each full chroma plane in the input corresponds to a field chroma plane in the output. */ plane_t dst_top; plane_t dst_bottom; FieldFromPlane( &dst_top, &p_outpic->p[i_out_plane], 0 ); FieldFromPlane( &dst_bottom, &p_outpic->p[i_out_plane], 1 ); /* Copy each field from the corresponding source. */ plane_CopyPixels( &dst_top, &p_inpic_top->p[i_plane] ); plane_CopyPixels( &dst_bottom, &p_inpic_bottom->p[i_plane] ); } else if( i_output_chroma == CC_SOURCE_TOP ) { /* Copy chroma of input top field. Ignore chroma of input bottom field. Input and output are both 4:2:0, so we just copy the whole plane. */ plane_CopyPixels( &p_outpic->p[i_out_plane], &p_inpic_top->p[i_plane] ); } else if( i_output_chroma == CC_SOURCE_BOTTOM ) { /* Copy chroma of input bottom field. Ignore chroma of input top field. Input and output are both 4:2:0, so we just copy the whole plane. */ plane_CopyPixels( &p_outpic->p[i_out_plane], &p_inpic_bottom->p[i_plane] ); } else /* i_output_chroma == CC_MERGE */ { /* Average the chroma of the input fields. Input and output are both 4:2:0. */ uint8_t *p_in_top, *p_in_bottom, *p_out_end, *p_out; p_in_top = p_inpic_top->p[i_plane].p_pixels; p_in_bottom = p_inpic_bottom->p[i_plane].p_pixels; p_out = p_outpic->p[i_out_plane].p_pixels; p_out_end = p_out + p_outpic->p[i_out_plane].i_pitch * p_outpic->p[i_out_plane].i_visible_lines; int w = FFMIN3( p_inpic_top->p[i_plane].i_visible_pitch, p_inpic_bottom->p[i_plane].i_visible_pitch, p_outpic->p[i_plane].i_visible_pitch ); for( ; p_out < p_out_end ; ) { Merge( p_out, p_in_top, p_in_bottom, w ); p_out += p_outpic->p[i_out_plane].i_pitch; p_in_top += p_inpic_top->p[i_plane].i_pitch; p_in_bottom += p_inpic_bottom->p[i_plane].i_pitch; } EndMerge(); } } } }
/**************************************************************************** * Filter: the whole thing **************************************************************************** * This function is called just after the thread is launched. ****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { filter_sys_t *p_sys = p_filter->p_sys; const video_format_t *p_fmti = &p_filter->fmt_in.video; const video_format_t *p_fmto = &p_filter->fmt_out.video; picture_t *p_pic_dst; /* Check if format properties changed */ if( Init( p_filter ) ) { picture_Release( p_pic ); return NULL; } /* Request output picture */ p_pic_dst = filter_NewPicture( p_filter ); if( !p_pic_dst ) { picture_Release( p_pic ); return NULL; } /* */ picture_t *p_src = p_pic; picture_t *p_dst = p_pic_dst; if( p_sys->i_extend_factor != 1 ) { p_src = p_sys->p_src_e; p_dst = p_sys->p_dst_e; CopyPad( p_src, p_pic ); } if( p_sys->b_copy && p_sys->b_swap_uvi == p_sys->b_swap_uvo ) picture_CopyPixels( p_dst, p_src ); else if( p_sys->b_copy ) SwapUV( p_dst, p_src ); else Convert( p_filter, p_sys->ctx, p_dst, p_src, p_fmti->i_height, 0, 3, p_sys->b_swap_uvi, p_sys->b_swap_uvo ); if( p_sys->ctxA ) { /* We extract the A plane to rescale it, and then we reinject it. */ if( p_fmti->i_chroma == VLC_CODEC_RGBA ) ExtractA( p_sys->p_src_a, p_src, p_fmti->i_width * p_sys->i_extend_factor, p_fmti->i_height ); else plane_CopyPixels( p_sys->p_src_a->p, p_src->p+A_PLANE ); Convert( p_filter, p_sys->ctxA, p_sys->p_dst_a, p_sys->p_src_a, p_fmti->i_height, 0, 1, false, false ); if( p_fmto->i_chroma == VLC_CODEC_RGBA ) InjectA( p_dst, p_sys->p_dst_a, p_fmto->i_width * p_sys->i_extend_factor, p_fmto->i_height ); else plane_CopyPixels( p_dst->p+A_PLANE, p_sys->p_dst_a->p ); } else if( p_sys->b_add_a ) { /* We inject a complete opaque alpha plane */ if( p_fmto->i_chroma == VLC_CODEC_RGBA ) FillA( &p_dst->p[0], OFFSET_A ); else FillA( &p_dst->p[A_PLANE], 0 ); } if( p_sys->i_extend_factor != 1 ) { picture_CopyPixels( p_pic_dst, p_dst ); } picture_CopyProperties( p_pic_dst, p_pic ); picture_Release( p_pic ); return p_pic_dst; }
/***************************************************************************** * Render: displays previously rendered output ***************************************************************************** * This function send the currently rendered image to Invert image, waits * until it is displayed and switch the two rendering buffers, preparing next * frame. *****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_outpic; int i, j; uint8_t *p_src = NULL; uint8_t *p_out = NULL; int i_src_pitch; int i_out_pitch; int pix; const int v1 = -1; const int v2 = 3; /* 2^3 = 8 */ if( !p_pic ) return NULL; p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { picture_Release( p_pic ); return NULL; } /* process the Y plane */ p_src = p_pic->p[Y_PLANE].p_pixels; p_out = p_outpic->p[Y_PLANE].p_pixels; i_src_pitch = p_pic->p[Y_PLANE].i_pitch; i_out_pitch = p_outpic->p[Y_PLANE].i_pitch; /* perform convolution only on Y plane. Avoid border line. */ vlc_mutex_lock( &p_filter->p_sys->lock ); for( i = 0; i < p_pic->p[Y_PLANE].i_visible_lines; i++ ) { if( (i == 0) || (i == p_pic->p[Y_PLANE].i_visible_lines - 1) ) { for( j = 0; j < p_pic->p[Y_PLANE].i_visible_pitch; j++ ) p_out[i * i_out_pitch + j] = clip( p_src[i * i_src_pitch + j] ); continue ; } for( j = 0; j < p_pic->p[Y_PLANE].i_visible_pitch; j++ ) { if( (j == 0) || (j == p_pic->p[Y_PLANE].i_visible_pitch - 1) ) { p_out[i * i_out_pitch + j] = p_src[i * i_src_pitch + j]; continue ; } pix = (p_src[(i - 1) * i_src_pitch + j - 1] * v1) + (p_src[(i - 1) * i_src_pitch + j ] * v1) + (p_src[(i - 1) * i_src_pitch + j + 1] * v1) + (p_src[(i ) * i_src_pitch + j - 1] * v1) + (p_src[(i ) * i_src_pitch + j ] << v2) + (p_src[(i ) * i_src_pitch + j + 1] * v1) + (p_src[(i + 1) * i_src_pitch + j - 1] * v1) + (p_src[(i + 1) * i_src_pitch + j ] * v1) + (p_src[(i + 1) * i_src_pitch + j + 1] * v1); pix = pix >= 0 ? clip(pix) : -clip(pix * -1); p_out[i * i_out_pitch + j] = clip( p_src[i * i_src_pitch + j] + p_filter->p_sys->tab_precalc[pix + 256] ); } } vlc_mutex_unlock( &p_filter->p_sys->lock ); plane_CopyPixels( &p_outpic->p[U_PLANE], &p_pic->p[U_PLANE] ); plane_CopyPixels( &p_outpic->p[V_PLANE], &p_pic->p[V_PLANE] ); return CopyInfoAndRelease( p_outpic, p_pic ); }