static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, int parity, int tff) { YADIFContext *yadif = ctx->priv; int y, i; for (i = 0; i < yadif->csp->nb_components; i++) { int w = dstpic->video->w; int h = dstpic->video->h; int refs = yadif->cur->linesize[i]; int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8; if (i == 1 || i == 2) { /* Why is this not part of the per-plane description thing? */ w >>= yadif->csp->log2_chroma_w; h >>= yadif->csp->log2_chroma_h; } for (y = 0; y < h; y++) { if ((y ^ parity) & 1) { uint8_t *prev = &yadif->prev->data[i][y*refs]; uint8_t *cur = &yadif->cur ->data[i][y*refs]; uint8_t *next = &yadif->next->data[i][y*refs]; uint8_t *dst = &dstpic->data[i][y*dstpic->linesize[i]]; int mode = y==1 || y+2==h ? 2 : yadif->mode; yadif->filter_line(dst, prev, cur, next, w, y+1<h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode); } else { memcpy(&dstpic->data[i][y*dstpic->linesize[i]], &yadif->cur->data[i][y*refs], w*df); } } }
static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, int parity, int tff) { YADIFContext *yadif = ctx->priv; int y, i; for (i = 0; i < yadif->csp->nb_components; i++) { int w = dstpic->video->w; int h = dstpic->video->h; int refs = yadif->cur->linesize[i]; int absrefs = FFABS(refs); int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8; if (i == 1 || i == 2) { /* Why is this not part of the per-plane description thing? */ w >>= yadif->csp->log2_chroma_w; h >>= yadif->csp->log2_chroma_h; } if(yadif->temp_line_size < absrefs) { av_free(yadif->temp_line); yadif->temp_line = av_mallocz(2*64 + 5*absrefs); yadif->temp_line_size = absrefs; } for (y = 0; y < h; y++) { if ((y ^ parity) & 1) { uint8_t *prev = &yadif->prev->data[i][y * refs]; uint8_t *cur = &yadif->cur ->data[i][y * refs]; uint8_t *next = &yadif->next->data[i][y * refs]; uint8_t *dst = &dstpic->data[i][y * dstpic->linesize[i]]; int mode = y == 1 || y + 2 == h ? 2 : yadif->mode; int prefs = y+1<h ? refs : -refs; int mrefs = y ?-refs : refs; if(y<=1 || y+2>=h) { uint8_t *tmp = yadif->temp_line + 64 + 2*absrefs; if(mode<2) memcpy(tmp+2*mrefs, cur+2*mrefs, w*df); memcpy(tmp+mrefs, cur+mrefs, w*df); memcpy(tmp , cur , w*df); if(prefs != mrefs) { memcpy(tmp+prefs, cur+prefs, w*df); if(mode<2) memcpy(tmp+2*prefs, cur+2*prefs, w*df); } cur = tmp; } yadif->filter_line(dst, prev, cur, next, w, prefs, mrefs, parity ^ tff, mode); } else { memcpy(&dstpic->data[i][y * dstpic->linesize[i]], &yadif->cur->data[i][y * refs], w * df); } } }
static void filter(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff) { YADIFContext *yadif = ctx->priv; int y, i; for (i = 0; i < yadif->csp->nb_components; i++) { int w = dstpic->width; int h = dstpic->height; int refs = yadif->cur->linesize[i]; int df = (yadif->csp->comp[i].depth_minus1 + 8) / 8; int l_edge, l_edge_pix; if (i == 1 || i == 2) { /* Why is this not part of the per-plane description thing? */ w >>= yadif->csp->log2_chroma_w; h >>= yadif->csp->log2_chroma_h; } /* filtering reads 3 pixels to the left/right; to avoid invalid reads, * we need to call the c variant which avoids this for border pixels */ l_edge = yadif->req_align; l_edge_pix = l_edge / df; for (y = 0; y < h; y++) { if ((y ^ parity) & 1) { uint8_t *prev = &yadif->prev->data[i][y * refs]; uint8_t *cur = &yadif->cur ->data[i][y * refs]; uint8_t *next = &yadif->next->data[i][y * refs]; uint8_t *dst = &dstpic->data[i][y * dstpic->linesize[i]]; int mode = y == 1 || y + 2 == h ? 2 : yadif->mode; if (yadif->req_align) { yadif->filter_line(dst + l_edge, prev + l_edge, cur + l_edge, next + l_edge, w - l_edge_pix - 3, y + 1 < h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode); yadif->filter_edges(dst, prev, cur, next, w, y + 1 < h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode, l_edge_pix); } else { yadif->filter_line(dst, prev, cur, next + l_edge, w, y + 1 < h ? refs : -refs, y ? -refs : refs, parity ^ tff, mode); } } else { memcpy(&dstpic->data[i][y * dstpic->linesize[i]], &yadif->cur->data[i][y * refs], w * df); } } }
static int return_frame(AVFilterContext *ctx, int is_second) { YADIFContext *yadif = ctx->priv; AVFilterLink *link = ctx->outputs[0]; int tff, ret; if (yadif->parity == -1) { tff = yadif->cur->interlaced_frame ? yadif->cur->top_field_first : 1; } else { tff = yadif->parity ^ 1; } if (is_second) { yadif->out = ff_get_video_buffer(link, link->w, link->h); if (!yadif->out) return AVERROR(ENOMEM); av_frame_copy_props(yadif->out, yadif->cur); yadif->out->interlaced_frame = 0; } yadif->filter(ctx, yadif->out, tff ^ !is_second, tff); if (is_second) { int64_t cur_pts = yadif->cur->pts; int64_t next_pts = yadif->next->pts; if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) { yadif->out->pts = cur_pts + next_pts; } else { yadif->out->pts = AV_NOPTS_VALUE; } } ret = ff_filter_frame(ctx->outputs[0], yadif->out); yadif->frame_pending = (yadif->mode&1) && !is_second; return ret; }