static picture_t *Deinterlace(filter_t *filter, picture_t *src) { filter_sys_t *sys = filter->p_sys; mtime_t last_pts = sys->last_pts; sys->last_pts = src->date; vlc_vdp_video_field_t *f1 = src->context; if (unlikely(f1 == NULL)) return src; if (f1->structure != VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME) return src; /* cannot deinterlace twice */ #ifdef VOUT_CORE_GETS_A_CLUE picture_t *dst = filter_NewPicture(filter); #else picture_t *dst = picture_NewFromFormat(&src->format); #endif if (dst == NULL) return src; /* cannot deinterlace without copying fields */ vlc_vdp_video_field_t *f2 = vlc_vdp_video_copy(f1); // shallow copy if (unlikely(f2 == NULL)) { picture_Release(dst); return src; } picture_CopyProperties(dst, src); dst->context = f2; if (last_pts != VLC_TS_INVALID) dst->date = (3 * src->date - last_pts) / 2; else if (filter->fmt_in.video.i_frame_rate != 0) dst->date = src->date + ((filter->fmt_in.video.i_frame_rate_base * CLOCK_FREQ) / filter->fmt_in.video.i_frame_rate); dst->b_top_field_first = !src->b_top_field_first; dst->i_nb_fields = 1; src->i_nb_fields = 1; assert(src->p_next == NULL); src->p_next = dst; if (src->b_progressive || src->b_top_field_first) { f1->structure = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD; f2->structure = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD; } else { f1->structure = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD; f2->structure = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD; } src->b_progressive = true; dst->b_progressive = true; return src; }
static picture_t *yuv420_rgb565_filter(filter_t *p_filter, picture_t *p_pic) { int width, height; picture_t *p_dst; // mtime_t bgn = mdate(); if (!p_pic) return NULL; p_dst = filter_NewPicture(p_filter); if (!p_dst) { picture_Release(p_pic); return NULL; } width = p_filter->fmt_in.video.i_width; height = p_filter->fmt_in.video.i_height; #ifdef HAVE_NEON yuv420_2_rgb565_mozilla( p_dst->p[0].p_pixels, // dst ptr p_pic->Y_PIXELS, // y p_pic->U_PIXELS, // u p_pic->V_PIXELS, // v width, // width height, // height p_pic->Y_PITCH, // y stride p_pic->U_PITCH, // uv stride p_dst->p[0].i_pitch // dst stride ); #else yuv420_2_rgb565( p_dst->p[0].p_pixels, // dst ptr p_pic->Y_PIXELS, // y p_pic->U_PIXELS, // u p_pic->V_PIXELS, // v width, // width height, // height p_pic->Y_PITCH, // y stride p_pic->U_PITCH, // uv stride p_dst->p[0].i_pitch, // dst stride yuv2rgb565_table, // table 0 // dither ); #endif picture_CopyProperties(p_dst, p_pic); picture_Release(p_pic); // mtime_t end = mdate(); // total += (end - bgn); // count += 1; // msg_Dbg(VLC_OBJECT(p_filter), "%s takes %lld us, average %lld", __func__, end - bgn, total / count); return p_dst; }
static picture_t *yuv444_rgb565_filter(filter_t *p_filter, picture_t *p_pic) { int width, height; picture_t *p_dst; if (!p_pic) return NULL; p_dst = filter_NewPicture(p_filter); if (!p_dst) { picture_Release(p_pic); return NULL; } width = p_filter->fmt_in.video.i_width; height = p_filter->fmt_in.video.i_height; #ifdef HAVE_NEON yuv444_2_rgb565_aurora( p_dst->p[0].p_pixels, // dst ptr p_pic->Y_PIXELS, // y p_pic->U_PIXELS, // u p_pic->V_PIXELS, // v width, // width height, // height p_pic->Y_PITCH, // y stride p_pic->U_PITCH, // uv stride p_dst->p[0].i_pitch // dst stride ); #else yuv444_2_rgb565( p_dst->p[0].p_pixels, // dst ptr p_pic->Y_PIXELS, // y p_pic->U_PIXELS, // u p_pic->V_PIXELS, // v width, // width height, // height p_pic->Y_PITCH, // y stride p_pic->U_PITCH, // uv stride p_dst->p[0].i_pitch, // dst stride yuv2rgb565_table, // table 0 // dither ); #endif picture_CopyProperties(p_dst, p_pic); picture_Release(p_pic); return p_dst; }
static picture_t *Filter(filter_t *filter, picture_t *src) { filter_sys_t *sys = filter->p_sys; picture_t *dst = filter_NewPicture(filter); if (!dst) { picture_Release(src); return NULL; } vlc_mutex_lock(&sys->lock); float strength = VLC_CLIP(sys->strength, STRENGTH_MIN, STRENGTH_MAX); int radius = VLC_CLIP((sys->radius + 1) & ~1, RADIUS_MIN, RADIUS_MAX); vlc_mutex_unlock(&sys->lock); const video_format_t *fmt = &filter->fmt_in.video; struct vf_priv_s *cfg = &sys->cfg; cfg->thresh = (1 << 15) / strength; if (cfg->radius != radius) { cfg->radius = radius; cfg->buf = aligned_alloc(16, (((fmt->i_width + 15) & ~15) * (cfg->radius + 1) / 2 + 32) * sizeof(*cfg->buf)); } for (int i = 0; i < dst->i_planes; i++) { const plane_t *srcp = &src->p[i]; plane_t *dstp = &dst->p[i]; const vlc_chroma_description_t *chroma = sys->chroma; int w = fmt->i_width * chroma->p[i].w.num / chroma->p[i].w.den; int h = fmt->i_height * chroma->p[i].h.num / chroma->p[i].h.den; int r = (cfg->radius * chroma->p[i].w.num / chroma->p[i].w.den + cfg->radius * chroma->p[i].h.num / chroma->p[i].h.den) / 2; r = VLC_CLIP((r + 1) & ~1, RADIUS_MIN, RADIUS_MAX); if (__MIN(w, h) > 2 * r && cfg->buf) { filter_plane(cfg, dstp->p_pixels, srcp->p_pixels, w, h, dstp->i_pitch, srcp->i_pitch, r); } else { plane_CopyPixels(dstp, srcp); } } picture_CopyProperties(dst, src); picture_Release(src); return dst; }
/***************************************************************************** * Do the processing here *****************************************************************************/ static picture_t *Deinterlace( filter_t *p_filter, picture_t *p_pic ) { filter_sys_t *p_sys = p_filter->p_sys; AVPicture src_pic, dest_pic; picture_t *p_pic_dst; int i, i_res = -1; /* Request output picture */ p_pic_dst = filter_NewPicture( p_filter ); if( !p_pic_dst ) { picture_Release( p_pic ); return NULL; } /* Prepare the AVPictures for the conversion */ for( i = 0; i < p_pic->i_planes; i++ ) { src_pic.data[i] = p_pic->p[i].p_pixels; src_pic.linesize[i] = p_pic->p[i].i_pitch; } for( i = 0; i < p_pic_dst->i_planes; i++ ) { dest_pic.data[i] = p_pic_dst->p[i].p_pixels; dest_pic.linesize[i] = p_pic_dst->p[i].i_pitch; } i_res = avpicture_deinterlace( &dest_pic, &src_pic, p_sys->i_src_ffmpeg_chroma, p_filter->fmt_in.video.i_width, p_filter->fmt_in.video.i_height ); if( i_res == -1 ) { msg_Err( p_filter, "deinterlacing picture failed" ); filter_DeletePicture( p_filter, p_pic_dst ); picture_Release( p_pic ); return NULL; } picture_CopyProperties( p_pic_dst, p_pic ); p_pic_dst->b_progressive = true; picture_Release( p_pic ); return p_pic_dst; }
/**************************************************************************** * Filter: the whole thing ****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_pic_dst; int i_plane, i; unsigned int j; /* Request output picture */ p_pic_dst = filter_NewPicture( p_filter ); if( !p_pic_dst ) { picture_Release( p_pic ); return NULL; } /* Convert RV24 to RV32 */ for( i_plane = 0; i_plane < p_pic_dst->i_planes; i_plane++ ) { uint8_t *p_src = p_pic->p[i_plane].p_pixels; uint8_t *p_dst = p_pic_dst->p[i_plane].p_pixels; unsigned int i_width = p_filter->fmt_out.video.i_width; for( i = 0; i < p_pic_dst->p[i_plane].i_lines; i++ ) { for( j = 0; j < i_width; j++ ) { *(p_dst++) = p_src[2]; *(p_dst++) = p_src[1]; *(p_dst++) = p_src[0]; *(p_dst++) = 0xff; /* Alpha */ p_src += 3; } p_src += p_pic->p[i_plane].i_pitch - 3 * i_width; p_dst += p_pic_dst->p[i_plane].i_pitch - 4 * i_width; } } picture_CopyProperties( p_pic_dst, p_pic ); picture_Release( p_pic ); return p_pic_dst; }
/**************************************************************************** * Filter: the whole thing **************************************************************************** * This function is called just after the thread is launched. ****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { filter_sys_t *p_sys = p_filter->p_sys; uint8_t *src[3]; int src_stride[3]; uint8_t *dst[3]; int dst_stride[3]; picture_t *p_pic_dst; int i_plane; int i_nb_planes = p_pic->i_planes; /* Check if format properties changed */ if( Init( p_filter ) != VLC_SUCCESS ) return NULL; /* Request output picture */ p_pic_dst = filter_NewPicture( p_filter ); if( !p_pic_dst ) { msg_Warn( p_filter, "can't get output picture" ); return NULL; } for( i_plane = 0; i_plane < __MIN(3, p_pic->i_planes); i_plane++ ) { src[i_plane] = p_pic->p[i_plane].p_pixels; src_stride[i_plane] = p_pic->p[i_plane].i_pitch; } for( i_plane = 0; i_plane < __MIN(3, i_nb_planes); i_plane++ ) { dst[i_plane] = p_pic_dst->p[i_plane].p_pixels; dst_stride[i_plane] = p_pic_dst->p[i_plane].i_pitch; } sws_arm_jit_scale( p_sys->ctx, src, src_stride, 0, p_filter->fmt_in.video.i_height, dst, dst_stride); picture_CopyProperties( p_pic_dst, p_pic ); picture_Release( p_pic ); return p_pic_dst; }
static picture_t *Filter(filter_t *filter, picture_t *src) { filter_sys_t *sys = filter->p_sys; picture_t *dst = filter_NewPicture(filter); if (!dst) { picture_Release(src); return NULL; } const vlc_chroma_description_t *chroma = sys->chroma; if (chroma->plane_count < 3) { /* TODO */ } else { for (unsigned i = 0; i < chroma->plane_count; i++) sys->dsc->planar(&dst->p[i], &src->p[i]); } picture_CopyProperties(dst, src); picture_Release(src); return dst; }
/**************************************************************************** * Filter: the whole thing **************************************************************************** * This function is called just after the thread is launched. ****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { filter_sys_t *p_sys = p_filter->p_sys; const video_format_t *p_fmti = &p_filter->fmt_in.video; const video_format_t *p_fmto = &p_filter->fmt_out.video; picture_t *p_pic_dst; /* Check if format properties changed */ if( Init( p_filter ) ) { picture_Release( p_pic ); return NULL; } /* Request output picture */ p_pic_dst = filter_NewPicture( p_filter ); if( !p_pic_dst ) { picture_Release( p_pic ); return NULL; } /* */ picture_t *p_src = p_pic; picture_t *p_dst = p_pic_dst; if( p_sys->i_extend_factor != 1 ) { p_src = p_sys->p_src_e; p_dst = p_sys->p_dst_e; CopyPad( p_src, p_pic ); } if( p_sys->b_copy && p_sys->b_swap_uvi == p_sys->b_swap_uvo ) picture_CopyPixels( p_dst, p_src ); else if( p_sys->b_copy ) SwapUV( p_dst, p_src ); else Convert( p_filter, p_sys->ctx, p_dst, p_src, p_fmti->i_height, 0, 3, p_sys->b_swap_uvi, p_sys->b_swap_uvo ); if( p_sys->ctxA ) { /* We extract the A plane to rescale it, and then we reinject it. */ if( p_fmti->i_chroma == VLC_CODEC_RGBA ) ExtractA( p_sys->p_src_a, p_src, p_fmti->i_width * p_sys->i_extend_factor, p_fmti->i_height ); else plane_CopyPixels( p_sys->p_src_a->p, p_src->p+A_PLANE ); Convert( p_filter, p_sys->ctxA, p_sys->p_dst_a, p_sys->p_src_a, p_fmti->i_height, 0, 1, false, false ); if( p_fmto->i_chroma == VLC_CODEC_RGBA ) InjectA( p_dst, p_sys->p_dst_a, p_fmto->i_width * p_sys->i_extend_factor, p_fmto->i_height ); else plane_CopyPixels( p_dst->p+A_PLANE, p_sys->p_dst_a->p ); } else if( p_sys->b_add_a ) { /* We inject a complete opaque alpha plane */ if( p_fmto->i_chroma == VLC_CODEC_RGBA ) FillA( &p_dst->p[0], OFFSET_A ); else FillA( &p_dst->p[A_PLANE], 0 ); } if( p_sys->i_extend_factor != 1 ) { picture_CopyPixels( p_pic_dst, p_dst ); } picture_CopyProperties( p_pic_dst, p_pic ); picture_Release( p_pic ); return p_pic_dst; }
/** * It creates multiples pictures from the source one */ static int Filter( video_splitter_t *p_splitter, picture_t *pp_dst[], picture_t *p_src ) { video_splitter_sys_t *p_sys = p_splitter->p_sys; if( video_splitter_NewPicture( p_splitter, pp_dst ) ) { picture_Release( p_src ); return VLC_EGENERIC; } for( int y = 0; y < p_sys->i_row; y++ ) { for( int x = 0; x < p_sys->i_col; x++ ) { const panoramix_output_t *p_output = &p_sys->pp_output[x][y]; if( !p_output->b_active ) continue; /* */ picture_t *p_dst = pp_dst[p_output->i_output]; /* */ picture_CopyProperties( p_dst, p_src ); /* */ for( int i_plane = 0; i_plane < p_src->i_planes; i_plane++ ) { const int i_div_w = p_sys->p_chroma->pi_div_w[i_plane]; const int i_div_h = p_sys->p_chroma->pi_div_h[i_plane]; if( !i_div_w || !i_div_h ) continue; const plane_t *p_srcp = &p_src->p[i_plane]; const plane_t *p_dstp = &p_dst->p[i_plane]; /* */ panoramix_filter_t filter; filter.black.i_right = p_output->filter.black.i_right / i_div_w; filter.black.i_left = p_output->filter.black.i_left / i_div_w; filter.black.i_top = p_output->filter.black.i_top / i_div_h; filter.black.i_bottom = p_output->filter.black.i_bottom / i_div_h; filter.attenuate.i_right = p_output->filter.attenuate.i_right / i_div_w; filter.attenuate.i_left = p_output->filter.attenuate.i_left / i_div_w; filter.attenuate.i_top = p_output->filter.attenuate.i_top / i_div_h; filter.attenuate.i_bottom = p_output->filter.attenuate.i_bottom / i_div_h; /* */ const int i_x = p_output->i_src_x/i_div_w; const int i_y = p_output->i_src_y/i_div_h; assert( p_sys->p_chroma->b_planar ); FilterPlanar( p_dstp->p_pixels, p_dstp->i_pitch, &p_srcp->p_pixels[i_y * p_srcp->i_pitch + i_x * p_srcp->i_pixel_pitch], p_srcp->i_pitch, p_output->i_src_width/i_div_w, p_output->i_src_height/i_div_h, p_sys->p_chroma->pi_black[i_plane], &filter, p_sys->p_lut[i_plane], p_sys->lambdav[i_plane], p_sys->lambdah[i_plane] ); } } } picture_Release( p_src ); return VLC_SUCCESS; }
void picture_Copy( picture_t *p_dst, const picture_t *p_src ) { picture_CopyPixels( p_dst, p_src ); picture_CopyProperties( p_dst, p_src ); }
/***************************************************************************** * Filter: displays previously rendered output ***************************************************************************** * This function send the currently rendered image to the internal opencv * filter for processing. *****************************************************************************/ static picture_t* Filter( filter_t* p_filter, picture_t* p_pic ) { picture_t* p_outpic = filter_NewPicture( p_filter ); if( p_outpic == NULL ) { msg_Err( p_filter, "couldn't get a p_outpic!" ); picture_Release( p_pic ); return NULL; } video_format_t fmt_out; // Make a copy if we want to show the original input if (p_filter->p_sys->i_wrapper_output == VINPUT) picture_Copy( p_outpic, p_pic ); VlcPictureToIplImage( p_filter, p_pic ); // Pass the image (as a pointer to the first IplImage*) to the // internal OpenCV filter for processing. p_filter->p_sys->p_opencv->pf_video_filter( p_filter->p_sys->p_opencv, (picture_t*)&(p_filter->p_sys->p_cv_image[0]) ); if(p_filter->p_sys->i_wrapper_output == PROCESSED) { // Processed video if( (p_filter->p_sys->p_proc_image) && (p_filter->p_sys->p_proc_image->i_planes > 0) && (p_filter->p_sys->i_internal_chroma != CINPUT) ) { //p_filter->p_sys->p_proc_image->format.i_chroma = VLC_CODEC_RGB24; memset( &fmt_out, 0, sizeof(video_format_t) ); fmt_out = p_pic->format; //picture_Release( p_outpic ); /* * We have to copy out the image from image_Convert(), otherwise * you leak pictures for some reason: * main video output error: pictures leaked, trying to workaround */ picture_t* p_outpic_tmp = image_Convert( p_filter->p_sys->p_image, p_filter->p_sys->p_proc_image, &(p_filter->p_sys->p_proc_image->format), &fmt_out ); picture_CopyPixels( p_outpic, p_outpic_tmp ); CopyInfoAndRelease( p_outpic, p_outpic_tmp ); } else if( p_filter->p_sys->i_internal_chroma == CINPUT ) { picture_CopyPixels( p_outpic, p_filter->p_sys->p_proc_image ); picture_CopyProperties( p_outpic, p_filter->p_sys->p_proc_image ); } } ReleaseImages( p_filter ); picture_Release( p_pic ); #ifndef NDEBUG msg_Dbg( p_filter, "Filter() done" ); #endif if( p_filter->p_sys->i_wrapper_output != NONE ) { return p_outpic; } else { // NONE picture_Release( p_outpic ); return NULL; } }
/* This is the filter function. See Open(). */ picture_t *Deinterlace( filter_t *p_filter, picture_t *p_pic ) { filter_sys_t *p_sys = p_filter->p_sys; picture_t *p_dst[DEINTERLACE_DST_SIZE]; /* Request output picture */ p_dst[0] = filter_NewPicture( p_filter ); if( p_dst[0] == NULL ) { picture_Release( p_pic ); return NULL; } picture_CopyProperties( p_dst[0], p_pic ); /* Any unused p_dst pointers must be NULL, because they are used to check how many output frames we have. */ for( int i = 1; i < DEINTERLACE_DST_SIZE; ++i ) p_dst[i] = NULL; /* Update the input frame history, if the currently active algorithm needs it. */ if( p_sys->b_use_frame_history ) { /* Duplicate the picture * TODO when the vout rework is finished, picture_Hold() might be enough * but becarefull, the pitches must match */ picture_t *p_dup = picture_NewFromFormat( &p_pic->format ); if( p_dup ) picture_Copy( p_dup, p_pic ); /* Slide the history */ if( p_sys->pp_history[0] ) picture_Release( p_sys->pp_history[0] ); for( int i = 1; i < HISTORY_SIZE; i++ ) p_sys->pp_history[i-1] = p_sys->pp_history[i]; p_sys->pp_history[HISTORY_SIZE-1] = p_dup; } /* Slide the metadata history. */ for( int i = 1; i < METADATA_SIZE; i++ ) { p_sys->meta.pi_date[i-1] = p_sys->meta.pi_date[i]; p_sys->meta.pi_nb_fields[i-1] = p_sys->meta.pi_nb_fields[i]; p_sys->meta.pb_top_field_first[i-1] = p_sys->meta.pb_top_field_first[i]; } /* The last element corresponds to the current input frame. */ p_sys->meta.pi_date[METADATA_SIZE-1] = p_pic->date; p_sys->meta.pi_nb_fields[METADATA_SIZE-1] = p_pic->i_nb_fields; p_sys->meta.pb_top_field_first[METADATA_SIZE-1] = p_pic->b_top_field_first; /* Remember the frame offset that we should use for this frame. The value in p_sys will be updated to reflect the correct value for the *next* frame when we call the renderer. */ int i_frame_offset = p_sys->i_frame_offset; int i_meta_idx = (METADATA_SIZE-1) - i_frame_offset; /* These correspond to the current *outgoing* frame. */ bool b_top_field_first; int i_nb_fields; if( i_frame_offset != CUSTOM_PTS ) { /* Pick the correct values from the history. */ b_top_field_first = p_sys->meta.pb_top_field_first[i_meta_idx]; i_nb_fields = p_sys->meta.pi_nb_fields[i_meta_idx]; } else { /* Framerate doublers must not request CUSTOM_PTS, as they need the original field timings, and need Deinterlace() to allocate the correct number of output frames. */ assert( !p_sys->b_double_rate ); /* NOTE: i_nb_fields is only used for framerate doublers, so it is unused in this case. b_top_field_first is only passed to the algorithm. We assume that algorithms that request CUSTOM_PTS will, if necessary, extract the TFF/BFF information themselves. */ b_top_field_first = p_pic->b_top_field_first; /* this is not guaranteed to be meaningful */ i_nb_fields = p_pic->i_nb_fields; /* unused */ } /* For framerate doublers, determine field duration and allocate output frames. */ mtime_t i_field_dur = 0; int i_double_rate_alloc_end = 0; /* One past last for allocated output frames in p_dst[]. Used only for framerate doublers. Will be inited below. Declared here because the PTS logic needs the result. */ if( p_sys->b_double_rate ) { /* Calculate one field duration. */ int i = 0; int iend = METADATA_SIZE-1; /* Find oldest valid logged date. The current input frame doesn't count. */ for( ; i < iend; i++ ) if( p_sys->meta.pi_date[i] > VLC_TS_INVALID ) break; if( i < iend ) { /* Count how many fields the valid history entries (except the new frame) represent. */ int i_fields_total = 0; for( int j = i ; j < iend; j++ ) i_fields_total += p_sys->meta.pi_nb_fields[j]; /* One field took this long. */ i_field_dur = (p_pic->date - p_sys->meta.pi_date[i]) / i_fields_total; } /* Note that we default to field duration 0 if it could not be determined. This behaves the same as the old code - leaving the extra output frame dates the same as p_pic->date if the last cached date was not valid. */ i_double_rate_alloc_end = i_nb_fields; if( i_nb_fields > DEINTERLACE_DST_SIZE ) { /* Note that the effective buffer size depends also on the constant private_picture in vout_wrapper.c, since that determines the maximum number of output pictures filter_NewPicture() will successfully allocate for one input frame. */ msg_Err( p_filter, "Framerate doubler: output buffer too small; "\ "fields = %d, buffer size = %d. Dropping the "\ "remaining fields.", i_nb_fields, DEINTERLACE_DST_SIZE ); i_double_rate_alloc_end = DEINTERLACE_DST_SIZE; } /* Allocate output frames. */ for( int i = 1; i < i_double_rate_alloc_end ; ++i ) { p_dst[i-1]->p_next = p_dst[i] = filter_NewPicture( p_filter ); if( p_dst[i] ) { picture_CopyProperties( p_dst[i], p_pic ); } else { msg_Err( p_filter, "Framerate doubler: could not allocate "\ "output frame %d", i+1 ); i_double_rate_alloc_end = i; /* Inform the PTS logic about the correct end position. */ break; /* If this happens, the rest of the allocations aren't likely to work, either... */ } } /* Now we have allocated *up to* the correct number of frames; normally, exactly the correct number. Upon alloc failure, we may have succeeded in allocating *some* output frames, but fewer than were desired. In such a case, as many will be rendered as were successfully allocated. Note that now p_dst[i] != NULL for 0 <= i < i_double_rate_alloc_end. */ } assert( p_sys->b_double_rate || p_dst[1] == NULL ); assert( i_nb_fields > 2 || p_dst[2] == NULL ); /* Render */ switch( p_sys->i_mode ) { case DEINTERLACE_DISCARD: RenderDiscard( p_dst[0], p_pic, 0 ); break; case DEINTERLACE_BOB: RenderBob( p_dst[0], p_pic, !b_top_field_first ); if( p_dst[1] ) RenderBob( p_dst[1], p_pic, b_top_field_first ); if( p_dst[2] ) RenderBob( p_dst[2], p_pic, !b_top_field_first ); break;; case DEINTERLACE_LINEAR: RenderLinear( p_filter, p_dst[0], p_pic, !b_top_field_first ); if( p_dst[1] ) RenderLinear( p_filter, p_dst[1], p_pic, b_top_field_first ); if( p_dst[2] ) RenderLinear( p_filter, p_dst[2], p_pic, !b_top_field_first ); break; case DEINTERLACE_MEAN: RenderMean( p_filter, p_dst[0], p_pic ); break; case DEINTERLACE_BLEND: RenderBlend( p_filter, p_dst[0], p_pic ); break; case DEINTERLACE_X: RenderX( p_dst[0], p_pic ); break; case DEINTERLACE_YADIF: if( RenderYadif( p_filter, p_dst[0], p_pic, 0, 0 ) ) goto drop; break; case DEINTERLACE_YADIF2X: if( RenderYadif( p_filter, p_dst[0], p_pic, 0, !b_top_field_first ) ) goto drop; if( p_dst[1] ) RenderYadif( p_filter, p_dst[1], p_pic, 1, b_top_field_first ); if( p_dst[2] ) RenderYadif( p_filter, p_dst[2], p_pic, 2, !b_top_field_first ); break; case DEINTERLACE_PHOSPHOR: if( RenderPhosphor( p_filter, p_dst[0], 0, !b_top_field_first ) ) goto drop; if( p_dst[1] ) RenderPhosphor( p_filter, p_dst[1], 1, b_top_field_first ); if( p_dst[2] ) RenderPhosphor( p_filter, p_dst[2], 2, !b_top_field_first ); break; case DEINTERLACE_IVTC: /* Note: RenderIVTC will automatically drop the duplicate frames produced by IVTC. This is part of normal operation. */ if( RenderIVTC( p_filter, p_dst[0] ) ) goto drop; break; } /* Set output timestamps, if the algorithm didn't request CUSTOM_PTS for this frame. */ assert( i_frame_offset <= METADATA_SIZE || i_frame_offset == CUSTOM_PTS ); if( i_frame_offset != CUSTOM_PTS ) { mtime_t i_base_pts = p_sys->meta.pi_date[i_meta_idx]; /* Note: in the usual case (i_frame_offset = 0 and b_double_rate = false), this effectively does nothing. This is needed to correct the timestamp when i_frame_offset > 0. */ p_dst[0]->date = i_base_pts; if( p_sys->b_double_rate ) { /* Processing all actually allocated output frames. */ for( int i = 1; i < i_double_rate_alloc_end; ++i ) { /* XXX it's not really good especially for the first picture, but * I don't think that delaying by one frame is worth it */ if( i_base_pts > VLC_TS_INVALID ) p_dst[i]->date = i_base_pts + i * i_field_dur; else p_dst[i]->date = VLC_TS_INVALID; } } } for( int i = 0; i < DEINTERLACE_DST_SIZE; ++i ) { if( p_dst[i] ) { p_dst[i]->b_progressive = true; p_dst[i]->i_nb_fields = 2; } } picture_Release( p_pic ); return p_dst[0]; drop: picture_Release( p_dst[0] ); for( int i = 1; i < DEINTERLACE_DST_SIZE; ++i ) { if( p_dst[i] ) picture_Release( p_dst[i] ); } picture_Release( p_pic ); return NULL; }
/**************************************************************************** * Filter: the whole thing ****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_out; if( !p_pic ) return NULL; const video_palette_t *p_yuvp = p_filter->fmt_in.video.p_palette; assert( p_yuvp != NULL ); assert( p_filter->fmt_in.video.i_chroma == VLC_CODEC_YUVP ); assert( p_filter->fmt_in.video.i_width == p_filter->fmt_out.video.i_width ); assert( p_filter->fmt_in.video.i_height == p_filter->fmt_out.video.i_height ); /* Request output picture */ p_out = filter_NewPicture( p_filter ); if( !p_out ) { picture_Release( p_pic ); return NULL; } if( p_filter->fmt_out.video.i_chroma == VLC_CODEC_YUVA ) { for( unsigned int y = 0; y < p_filter->fmt_in.video.i_height; y++ ) { const uint8_t *p_line = &p_pic->p->p_pixels[y*p_pic->p->i_pitch]; uint8_t *p_y = &p_out->Y_PIXELS[y*p_out->Y_PITCH]; uint8_t *p_u = &p_out->U_PIXELS[y*p_out->U_PITCH]; uint8_t *p_v = &p_out->V_PIXELS[y*p_out->V_PITCH]; uint8_t *p_a = &p_out->A_PIXELS[y*p_out->A_PITCH]; for( unsigned int x = 0; x < p_filter->fmt_in.video.i_width; x++ ) { const int v = p_line[x]; if( v > p_yuvp->i_entries ) /* maybe assert ? */ continue; p_y[x] = p_yuvp->palette[v][0]; p_u[x] = p_yuvp->palette[v][1]; p_v[x] = p_yuvp->palette[v][2]; p_a[x] = p_yuvp->palette[v][3]; } } } else { assert( p_filter->fmt_out.video.i_chroma == VLC_CODEC_RGBA ); /* Create a RGBA palette */ video_palette_t rgbp; rgbp.i_entries = p_yuvp->i_entries; for( int i = 0; i < p_yuvp->i_entries; i++ ) { Yuv2Rgb( &rgbp.palette[i][0], &rgbp.palette[i][1], &rgbp.palette[i][2], p_yuvp->palette[i][0], p_yuvp->palette[i][1], p_yuvp->palette[i][2] ); rgbp.palette[i][3] = p_yuvp->palette[i][3]; } /* */ for( unsigned int y = 0; y < p_filter->fmt_in.video.i_height; y++ ) { const uint8_t *p_line = &p_pic->p->p_pixels[y*p_pic->p->i_pitch]; uint8_t *p_rgba = &p_out->p->p_pixels[y*p_out->p->i_pitch]; for( unsigned int x = 0; x < p_filter->fmt_in.video.i_width; x++ ) { const int v = p_line[x]; if( v >= rgbp.i_entries ) /* maybe assert ? */ continue; p_rgba[4*x+0] = rgbp.palette[v][0]; p_rgba[4*x+1] = rgbp.palette[v][1]; p_rgba[4*x+2] = rgbp.palette[v][2]; p_rgba[4*x+3] = rgbp.palette[v][3]; } } } picture_CopyProperties( p_out, p_pic ); picture_Release( p_pic ); return p_out; }
picture_t *DoDeinterlacing( filter_t *p_filter, struct deinterlace_ctx *p_context, picture_t *p_pic ) { picture_t *p_dst[DEINTERLACE_DST_SIZE]; int i_double_rate_alloc_end; /* Remember the frame offset that we should use for this frame. The value in p_sys will be updated to reflect the correct value for the *next* frame when we call the renderer. */ int i_frame_offset; int i_meta_idx; bool b_top_field_first; /* Request output picture */ p_dst[0] = filter_NewPicture( p_filter ); if( p_dst[0] == NULL ) { picture_Release( p_pic ); return NULL; } picture_CopyProperties( p_dst[0], p_pic ); /* Any unused p_dst pointers must be NULL, because they are used to check how many output frames we have. */ for( int i = 1; i < DEINTERLACE_DST_SIZE; ++i ) p_dst[i] = NULL; /* Update the input frame history, if the currently active algorithm needs it. */ if( p_context->settings.b_use_frame_history ) { /* Keep reference for the picture */ picture_t *p_dup = picture_Hold( p_pic ); /* Slide the history */ if( p_context->pp_history[0] ) picture_Release( p_context->pp_history[0] ); for( int i = 1; i < HISTORY_SIZE; i++ ) p_context->pp_history[i-1] = p_context->pp_history[i]; p_context->pp_history[HISTORY_SIZE-1] = p_dup; } /* Slide the metadata history. */ for( int i = 1; i < METADATA_SIZE; i++ ) p_context->meta[i-1] = p_context->meta[i]; /* The last element corresponds to the current input frame. */ p_context->meta[METADATA_SIZE-1].pi_date = p_pic->date; p_context->meta[METADATA_SIZE-1].pi_nb_fields = p_pic->i_nb_fields; p_context->meta[METADATA_SIZE-1].pb_top_field_first = p_pic->b_top_field_first; /* Remember the frame offset that we should use for this frame. The value in p_sys will be updated to reflect the correct value for the *next* frame when we call the renderer. */ i_frame_offset = p_context->i_frame_offset; i_meta_idx = (METADATA_SIZE-1) - i_frame_offset; int i_nb_fields; /* These correspond to the current *outgoing* frame. */ if( i_frame_offset != CUSTOM_PTS ) { /* Pick the correct values from the history. */ b_top_field_first = p_context->meta[i_meta_idx].pb_top_field_first; i_nb_fields = p_context->meta[i_meta_idx].pi_nb_fields; } else { /* Framerate doublers must not request CUSTOM_PTS, as they need the original field timings, and need Deinterlace() to allocate the correct number of output frames. */ assert( !p_context->settings.b_double_rate ); /* NOTE: i_nb_fields is only used for framerate doublers, so it is unused in this case. b_top_field_first is only passed to the algorithm. We assume that algorithms that request CUSTOM_PTS will, if necessary, extract the TFF/BFF information themselves. */ b_top_field_first = p_pic->b_top_field_first; /* this is not guaranteed to be meaningful */ i_nb_fields = p_pic->i_nb_fields; /* unused */ } /* For framerate doublers, determine field duration and allocate output frames. */ i_double_rate_alloc_end = 0; /* One past last for allocated output frames in p_dst[]. Used only for framerate doublers. Will be inited below. Declared here because the PTS logic needs the result. */ if( p_context->settings.b_double_rate ) { i_double_rate_alloc_end = i_nb_fields; if( i_nb_fields > DEINTERLACE_DST_SIZE ) { /* Note that the effective buffer size depends also on the constant private_picture in vout_wrapper.c, since that determines the maximum number of output pictures filter_NewPicture() will successfully allocate for one input frame. */ msg_Err( p_filter, "Framerate doubler: output buffer too small; "\ "fields = %d, buffer size = %d. Dropping the "\ "remaining fields.", i_nb_fields, DEINTERLACE_DST_SIZE ); i_double_rate_alloc_end = DEINTERLACE_DST_SIZE; } /* Allocate output frames. */ for( int i = 1; i < i_double_rate_alloc_end ; ++i ) { p_dst[i-1]->p_next = p_dst[i] = filter_NewPicture( p_filter ); if( p_dst[i] ) { picture_CopyProperties( p_dst[i], p_pic ); } else { msg_Err( p_filter, "Framerate doubler: could not allocate "\ "output frame %d", i+1 ); i_double_rate_alloc_end = i; /* Inform the PTS logic about the correct end position. */ break; /* If this happens, the rest of the allocations aren't likely to work, either... */ } } /* Now we have allocated *up to* the correct number of frames; normally, exactly the correct number. Upon alloc failure, we may have succeeded in allocating *some* output frames, but fewer than were desired. In such a case, as many will be rendered as were successfully allocated. Note that now p_dst[i] != NULL for 0 <= i < i_double_rate_alloc_end. */ } assert( p_context->settings.b_double_rate || p_dst[1] == NULL ); assert( i_nb_fields > 2 || p_dst[2] == NULL ); /* Render */ if ( !p_context->settings.b_double_rate ) { if ( p_context->pf_render_single_pic( p_filter, p_dst[0], p_pic ) ) goto drop; } else { /* Note: RenderIVTC will automatically drop the duplicate frames produced by IVTC. This is part of normal operation. */ if ( p_context->pf_render_ordered( p_filter, p_dst[0], p_pic, 0, !b_top_field_first ) ) goto drop; if ( p_dst[1] ) p_context->pf_render_ordered( p_filter, p_dst[1], p_pic, 1, b_top_field_first ); if ( p_dst[2] ) p_context->pf_render_ordered( p_filter, p_dst[1], p_pic, 2, !b_top_field_first ); } if ( p_context->settings.b_custom_pts ) { assert(p_context->settings.b_use_frame_history); if( p_context->pp_history[0] && p_context->pp_history[1] ) { /* The next frame will get a custom timestamp, too. */ p_context->i_frame_offset = CUSTOM_PTS; } else if( !p_context->pp_history[0] && !p_context->pp_history[1] ) /* first frame */ { } else /* second frame */ { /* At the next frame, the filter starts. The next frame will get a custom timestamp. */ p_context->i_frame_offset = CUSTOM_PTS; } } /* Set output timestamps, if the algorithm didn't request CUSTOM_PTS for this frame. */ assert( i_frame_offset <= METADATA_SIZE || i_frame_offset == CUSTOM_PTS ); if( i_frame_offset != CUSTOM_PTS ) { mtime_t i_base_pts = p_context->meta[i_meta_idx].pi_date; /* Note: in the usual case (i_frame_offset = 0 and b_double_rate = false), this effectively does nothing. This is needed to correct the timestamp when i_frame_offset > 0. */ p_dst[0]->date = i_base_pts; if( p_context->settings.b_double_rate ) { mtime_t i_field_dur = GetFieldDuration( p_context, &p_filter->fmt_out.video, p_pic ); /* Processing all actually allocated output frames. */ for( int i = 1; i < i_double_rate_alloc_end; ++i ) { /* XXX it's not really good especially for the first picture, but * I don't think that delaying by one frame is worth it */ if( i_base_pts > VLC_TS_INVALID ) p_dst[i]->date = i_base_pts + i * i_field_dur; else p_dst[i]->date = VLC_TS_INVALID; } } } for( int i = 0; i < DEINTERLACE_DST_SIZE; ++i ) { if( p_dst[i] ) { p_dst[i]->b_progressive = true; p_dst[i]->i_nb_fields = 2; } } picture_Release( p_pic ); return p_dst[0]; drop: picture_Release( p_dst[0] ); for( int i = 1; i < DEINTERLACE_DST_SIZE; ++i ) { if( p_dst[i] ) picture_Release( p_dst[i] ); } picture_Release( p_pic ); return NULL; }