static void end_frame(AVFilterLink *link) { TransContext *trans = link->dst->priv; AVFilterPicRef *in = link->cur_pic; AVFilterPicRef *out = link->dst->outputs[0]->outpic; AVFilterPicRef *pic = link->cur_pic; AVFilterLink *output = link->dst->outputs[0]; int i, j, plane; /* luma plane */ for(i = 0; i < pic->h; i ++) for(j = 0; j < pic->w; j ++) *(out->data[0] + j *out->linesize[0] + i) = *(in->data[0]+ i * in->linesize[0] + j); /* chroma planes */ for(plane = 1; plane < 3; plane ++) { for(i = 0; i < pic->h >> trans->vsub; i++) { for(j = 0; j < pic->w >> trans->hsub; j++) *(out->data[plane] + j *out->linesize[plane] + i) = *(in->data[plane]+ i * in->linesize[plane] + j); } } avfilter_unref_pic(in); avfilter_draw_slice(output, 0, out->h, 1); avfilter_end_frame(output); avfilter_unref_pic(out); }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; ShowInfoContext *showinfo = ctx->priv; AVFilterBufferRef *picref = inlink->cur_buf; uint32_t plane_crc[4], crc = 0; int plane; for (plane = 0; plane < 4; plane++) { size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane); plane_crc[plane] = av_adler32_update(0 , picref->data[plane], linesize); crc = av_adler32_update(crc, picref->data[plane], linesize); } av_log(ctx, AV_LOG_INFO, "n:%d pts:%"PRId64" pts_time:%f pos:%"PRId64" " "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c " "crc:%u plane_crc:[%u %u %u %u]\n", showinfo->frame, picref->pts, picref ->pts * av_q2d(inlink->time_base), picref->pos, av_pix_fmt_descriptors[picref->format].name, picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den, picref->video->w, picref->video->h, !picref->video->interlaced ? 'P' : /* Progressive */ picref->video->top_field_first ? 'T' : 'B', /* Top / Bottom */ picref->video->key_frame, av_get_picture_type_char(picref->video->pict_type), crc, plane_crc[0], plane_crc[1], plane_crc[2], plane_crc[3]); showinfo->frame++; avfilter_end_frame(inlink->dst->outputs[0]); }
static void end_frame(AVFilterLink *inlink) { RemovelogoContext *removelogo = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int direct = inpicref == outpicref; blur_image(removelogo->mask, inpicref ->data[0], inpicref ->linesize[0], outpicref->data[0], outpicref->linesize[0], removelogo->full_mask_data, inlink->w, inlink->w, inlink->h, direct, &removelogo->full_mask_bbox); blur_image(removelogo->mask, inpicref ->data[1], inpicref ->linesize[1], outpicref->data[1], outpicref->linesize[1], removelogo->half_mask_data, inlink->w/2, inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); blur_image(removelogo->mask, inpicref ->data[2], inpicref ->linesize[2], outpicref->data[2], outpicref->linesize[2], removelogo->half_mask_data, inlink->w/2, inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); avfilter_draw_slice(outlink, 0, inlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(inpicref); if (!direct) avfilter_unref_buffer(outpicref); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; SelectContext *select = ctx->priv; AVFilterLink *inlink = outlink->src->inputs[0]; select->select = 0; if (av_fifo_size(select->pending_frames)) { AVFilterBufferRef *picref; av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(picref); return 0; } while (!select->select) { int ret = avfilter_request_frame(inlink); if (ret < 0) return ret; } return 0; }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; BoxBlurContext *boxblur = ctx->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int plane; int cw = inlink->w >> boxblur->hsub, ch = inlink->h >> boxblur->vsub; int w[4] = { inlink->w, cw, cw, inlink->w }; int h[4] = { inlink->h, ch, ch, inlink->h }; for (plane = 0; inpicref->data[plane] && plane < 4; plane++) hblur(outpicref->data[plane], outpicref->linesize[plane], inpicref ->data[plane], inpicref ->linesize[plane], w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], boxblur->temp); for (plane = 0; inpicref->data[plane] && plane < 4; plane++) vblur(outpicref->data[plane], outpicref->linesize[plane], outpicref->data[plane], outpicref->linesize[plane], w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], boxblur->temp); avfilter_draw_slice(outlink, 0, inlink->h, 1); avfilter_end_frame(outlink); }
static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; AVFilterBufferRef *picref; if (!c->has_frame) { av_log(link->src, AV_LOG_ERROR, "request_frame() called with no available frame!\n"); return -1; } picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h); av_image_copy(picref->data, picref->linesize, c->frame.data, c->frame.linesize, picref->format, link->w, link->h); picref->pts = c->pts; picref->video->interlaced = c->frame.interlaced_frame; picref->video->top_field_first = c->frame.top_field_first; avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); avfilter_unref_buffer(picref); c->has_frame = 0; return 0; }
static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; AVFilterBufferRef *picref; if (!c->has_frame) { av_log(link->src, AV_LOG_ERROR, "request_frame() called with no available frame!\n"); //return -1; } /* This picture will be needed unmodified later for decoding the next * frame */ picref = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE2, link->w, link->h); av_picture_copy((AVPicture *)&picref->data, (AVPicture *)&c->frame, picref->format, link->w, link->h); picref->pts = c->pts; picref->pixel_aspect = c->pixel_aspect; picref->interlaced = c->frame.interlaced_frame; picref->top_field_first = c->frame.top_field_first; avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); avfilter_unref_buffer(picref); c->has_frame = 0; return 0; }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; BBoxContext *bbox = ctx->priv; AVFilterBufferRef *picref = inlink->cur_buf; FFBoundingBox box; int has_bbox, w, h; has_bbox = ff_calculate_bounding_box(&box, picref->data[0], picref->linesize[0], inlink->w, inlink->h, 16); w = box.x2 - box.x1 + 1; h = box.y2 - box.y1 + 1; av_log(ctx, AV_LOG_INFO, "n:%d pts:%s pts_time:%s", bbox->frame, av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base)); if (has_bbox) { av_log(ctx, AV_LOG_INFO, "x1:%d x2:%d y1:%d y2:%d w:%d h:%d" " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d", box.x1, box.x2, box.y1, box.y2, w, h, w, h, box.x1, box.y1, /* crop params */ box.x1, box.y1, w, h); /* drawbox params */ } av_log(ctx, AV_LOG_INFO, "\n"); bbox->frame++; avfilter_end_frame(inlink->dst->outputs[0]); }
static int request_frame(AVFilterLink *outlink) { FifoContext *fifo = outlink->src->priv; BufPic *tmp; int ret; if (!fifo->root.next) { if ((ret = avfilter_request_frame(outlink->src->inputs[0]) < 0)) return ret; } /* by doing this, we give ownership of the reference to the next filter, * so we don't have to worry about dereferencing it ourselves. */ avfilter_start_frame(outlink, fifo->root.next->picref); avfilter_draw_slice (outlink, 0, outlink->h, 1); avfilter_end_frame (outlink); if (fifo->last == fifo->root.next) fifo->last = &fifo->root; tmp = fifo->root.next->next; av_free(fifo->root.next); fifo->root.next = tmp; return 0; }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; int i; for (i = 0; i < ctx->output_count; i++) avfilter_end_frame(ctx->outputs[i]); avfilter_unref_buffer(inlink->cur_buf); }
static void end_frame(AVFilterLink *inlink) { SelectContext *select = inlink->dst->priv; AVFilterBufferRef *picref = inlink->cur_buf; if (select->select) { if (select->cache_frames) return; avfilter_end_frame(inlink->dst->outputs[0]); } avfilter_unref_buffer(picref); }
static int mpsrc_request_frame(AVFilterLink *link) { struct mpsrc_priv *c = link->src->priv; struct vf_instance *vf = c->vf; if (!vf->priv->in_buf) return AVERROR(EINVAL); avfilter_start_frame(link, avfilter_ref_buffer(vf->priv->in_buf, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); vf->priv->in_buf = NULL; return 0; }
static void end_frame(AVFilterLink *link) { PluginContext *plugin = link->dst->priv; AVFilterBufferRef *in = link->cur_buf; AVFilterBufferRef *out = link->dst->outputs[0]->out_buf; if ( plugin->process( out->data, out->linesize, in->data, in->linesize, link->w, link->h ) < 0 ) av_log(link->dst, AV_LOG_ERROR, "function process failed\n" ); avfilter_unref_buffer(in); avfilter_draw_slice(link->dst->outputs[0], 0, link->h, 1); avfilter_end_frame(link->dst->outputs[0]); avfilter_unref_buffer(out); }
static void end_frame(AVFilterLink *inlink) { AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; super2xsai(inlink->dst, inpicref->data[0], inpicref->linesize[0], outpicref->data[0], outpicref->linesize[0], inlink->w, inlink->h); avfilter_unref_buffer(inpicref); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(outpicref); }
static void end_frame(AVFilterLink *inlink) { Frei0rContext *frei0r = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; frei0r->update(frei0r->instance, inpicref->pts * av_q2d(inlink->time_base) * 1000, (const uint32_t *)inpicref->data[0], (uint32_t *)outpicref->data[0]); avfilter_unref_buffer(inpicref); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(outpicref); }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; BlackFrameContext *blackframe = ctx->priv; AVFilterBufferRef *picref = inlink->cur_buf; int pblack = 0; pblack = blackframe->nblack * 100 / (inlink->w * inlink->h); if (pblack >= blackframe->bamount) av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%f\n", blackframe->frame, pblack, picref->pos, picref->pts == AV_NOPTS_VALUE ? -1 : (double)picref->pts / AV_TIME_BASE); blackframe->frame++; blackframe->nblack = 0; avfilter_end_frame(inlink->dst->outputs[0]); }
static int source_request_frame(AVFilterLink *outlink) { Frei0rContext *frei0r = outlink->src->priv; AVFilterBufferRef *picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = frei0r->pts++; picref->pos = -1; avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), NULL, (uint32_t *)picref->data[0]); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(picref); return 0; }
static void end_frame(AVFilterLink *link) { AVFilterContext *ctx = link->dst; ColorMatrixContext *color = ctx->priv; AVFilterBufferRef *out = link->dst->outputs[0]->out_buf; if (link->cur_buf->format == PIX_FMT_YUV422P) process_frame_yuv422p(color, out, link->cur_buf); else if (link->cur_buf->format == PIX_FMT_YUV420P) process_frame_yuv420p(color, out, link->cur_buf); else process_frame_uyvy422(color, out, link->cur_buf); avfilter_draw_slice(ctx->outputs[0], 0, link->dst->outputs[0]->h, 1); avfilter_end_frame(ctx->outputs[0]); avfilter_unref_buffer(link->cur_buf); }
static int color_request_frame(AVFilterLink *link) { ColorContext *color = link->src->priv; AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = av_rescale_q(color->pts++, color->time_base, AV_TIME_BASE_Q); picref->pos = 0; avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0)); ff_draw_rectangle(picref->data, picref->linesize, color->line, color->line_step, color->hsub, color->vsub, 0, 0, color->w, color->h); avfilter_draw_slice(link, 0, color->h, 1); avfilter_end_frame(link); avfilter_unref_buffer(picref); return 0; }
static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; if (!c->picref) { av_log(link->src, AV_LOG_WARNING, "request_frame() called with no available frame!\n"); return AVERROR(EINVAL); } avfilter_start_frame(link, avfilter_ref_buffer(c->picref, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); avfilter_unref_buffer(c->picref); c->picref = NULL; return 0; }
void avfilter_default_end_frame(AVFilterLink *inlink) { AVFilterLink *outlink = NULL; if (inlink->dst->output_count) outlink = inlink->dst->outputs[0]; avfilter_unref_buffer(inlink->cur_buf); inlink->cur_buf = NULL; if (outlink) { if (outlink->out_buf) { avfilter_unref_buffer(outlink->out_buf); outlink->out_buf = NULL; } avfilter_end_frame(outlink); } }
void avfilter_default_end_frame(AVFilterLink *link) { AVFilterLink *out = NULL; if(link->dst->output_count) out = link->dst->outputs[0]; avfilter_unref_pic(link->cur_pic); link->cur_pic = NULL; if(out) { if(out->outpic) { avfilter_unref_pic(out->outpic); out->outpic = NULL; } avfilter_end_frame(out); } }
static void smooth_end_frame(AVFilterLink *inlink) { SmoothContext *smooth = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; IplImage inimg, outimg; fill_iplimage_from_picref(&inimg , inpicref , inlink->format); fill_iplimage_from_picref(&outimg, outpicref, inlink->format); cvSmooth(&inimg, &outimg, smooth->type, smooth->param1, smooth->param2, smooth->param3, smooth->param4); fill_picref_from_iplimage(outpicref, &outimg, inlink->format); avfilter_unref_buffer(inpicref); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(outpicref); }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; OCVContext *ocv = ctx->priv; AVFilterLink *outlink= inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; IplImage inimg, outimg; fill_iplimage_from_picref(&inimg , inpicref , inlink->format); fill_iplimage_from_picref(&outimg, outpicref, inlink->format); ocv->end_frame_filter(ctx, &inimg, &outimg); fill_picref_from_iplimage(outpicref, &outimg, inlink->format); avfilter_unref_buffer(inpicref); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(outpicref); }
static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; AVFilterBufferRef *buf; if (!av_fifo_size(c->fifo)) { av_log(link->src, AV_LOG_WARNING, "request_frame() called with no available frame!\n"); return AVERROR(EINVAL); } av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL); avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); avfilter_unref_buffer(buf); return 0; }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; BlackFrameContext *blackframe = ctx->priv; AVFilterBufferRef *picref = inlink->cur_buf; int pblack = 0; if (picref->video->key_frame) blackframe->last_keyframe = blackframe->frame; pblack = blackframe->nblack * 100 / (inlink->w * inlink->h); if (pblack >= blackframe->bamount) av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f " "type:%c last_keyframe:%d\n", blackframe->frame, pblack, picref->pos, picref->pts, picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base), av_get_picture_type_char(picref->video->pict_type), blackframe->last_keyframe); blackframe->frame++; blackframe->nblack = 0; avfilter_end_frame(inlink->dst->outputs[0]); }
static int request_frame(AVFilterLink *link) { FPSContext *fps = link->src->priv; if (fps->videoend) return -1; if (!fps->has_frame) // support for filtering without poll_frame usage while(!fps->pic || fps->pic->pts < fps->pts) if(avfilter_request_frame(link->src->inputs[0])) return -1; fps->has_frame=0; avfilter_start_frame(link, avfilter_ref_pic(fps->pic, ~AV_PERM_WRITE)); avfilter_draw_slice (link, 0, fps->pic->h, 1); avfilter_end_frame (link); avfilter_unref_pic(fps->pic); fps->pic = NULL; fps->pts += fps->timebase; return 0; }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; CropDetectContext *cd = ctx->priv; AVFilterBufferRef *picref = inlink->cur_buf; int bpp = cd->max_pixsteps[0]; int w, h, x, y, shrink_by; // ignore first 2 frames - they may be empty if (++cd->frame_nb > 0) { // Reset the crop area every reset_count frames, if reset_count is > 0 if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) { cd->x1 = picref->video->w-1; cd->y1 = picref->video->h-1; cd->x2 = 0; cd->y2 = 0; cd->frame_nb = 1; } for (y = 0; y < cd->y1; y++) { if (checkline(ctx, picref->data[0] + picref->linesize[0] * y, bpp, picref->video->w, bpp) > cd->limit) { cd->y1 = y; break; } } for (y = picref->video->h-1; y > cd->y2; y--) { if (checkline(ctx, picref->data[0] + picref->linesize[0] * y, bpp, picref->video->w, bpp) > cd->limit) { cd->y2 = y; break; } } for (y = 0; y < cd->x1; y++) { if (checkline(ctx, picref->data[0] + bpp*y, picref->linesize[0], picref->video->h, bpp) > cd->limit) { cd->x1 = y; break; } } for (y = picref->video->w-1; y > cd->x2; y--) { if (checkline(ctx, picref->data[0] + bpp*y, picref->linesize[0], picref->video->h, bpp) > cd->limit) { cd->x2 = y; break; } } // round x and y (up), important for yuv colorspaces // make sure they stay rounded! x = (cd->x1+1) & ~1; y = (cd->y1+1) & ~1; w = cd->x2 - x + 1; h = cd->y2 - y + 1; // w and h must be divisible by 2 as well because of yuv // colorspace problems. if (cd->round <= 1) cd->round = 16; if (cd->round % 2) cd->round *= 2; shrink_by = w % cd->round; w -= shrink_by; x += (shrink_by/2 + 1) & ~1; shrink_by = h % cd->round; h -= shrink_by; y += (shrink_by/2 + 1) & ~1; av_log(ctx, AV_LOG_INFO, "x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pos:%"PRId64" pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n", cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, picref->pos, picref->pts, picref->pts == AV_NOPTS_VALUE ? -1 : picref->pts * av_q2d(inlink->time_base), w, h, x, y); } avfilter_end_frame(inlink->dst->outputs[0]); }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; FieldOrderContext *fieldorder = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int h, plane, line_step, line_size, line; uint8_t *cpy_src, *cpy_dst; if ( inpicref->video->interlaced && inpicref->video->top_field_first != fieldorder->dst_tff) { av_dlog(ctx, "picture will move %s one line\n", fieldorder->dst_tff ? "up" : "down"); h = inpicref->video->h; for (plane = 0; plane < 4 && inpicref->data[plane]; plane++) { line_step = inpicref->linesize[plane]; line_size = fieldorder->line_size[plane]; cpy_src = inpicref->data[plane]; cpy_dst = outpicref->data[plane]; if (fieldorder->dst_tff) { /** Move every line up one line, working from * the top to the bottom of the frame. * The original top line is lost. * The new last line is created as a copy of the * penultimate line from that field. */ for (line = 0; line < h; line++) { if (1 + line < outpicref->video->h) { memcpy(cpy_dst, cpy_src + line_step, line_size); } else { memcpy(cpy_dst, cpy_src - line_step - line_step, line_size); } cpy_src += line_step; cpy_dst += line_step; } } else { /** Move every line down one line, working from * the bottom to the top of the frame. * The original bottom line is lost. * The new first line is created as a copy of the * second line from that field. */ cpy_src += (h - 1) * line_step; cpy_dst += (h - 1) * line_step; for (line = h - 1; line >= 0 ; line--) { if (line > 0) { memcpy(cpy_dst, cpy_src - line_step, line_size); } else { memcpy(cpy_dst, cpy_src + line_step + line_step, line_size); } cpy_src -= line_step; cpy_dst -= line_step; } } } outpicref->video->top_field_first = fieldorder->dst_tff; avfilter_draw_slice(outlink, 0, h, 1); } else { av_dlog(ctx, "not interlaced or field order already correct\n"); } avfilter_end_frame(outlink); avfilter_unref_buffer(inpicref); }
void avfilter_null_end_frame(AVFilterLink *link) { avfilter_end_frame(link->dst->outputs[0]); }