static int request_frame(AVFilterLink *outlink) { FifoContext *fifo = outlink->src->priv; BufPic *tmp; int ret; if (!fifo->root.next) { if ((ret = ff_request_frame(outlink->src->inputs[0]) < 0)) return ret; } /* by doing this, we give ownership of the reference to the next filter, * so we don't have to worry about dereferencing it ourselves. */ ff_start_frame(outlink, fifo->root.next->picref); ff_draw_slice (outlink, 0, outlink->h, 1); ff_end_frame (outlink); if (fifo->last == fifo->root.next) fifo->last = &fifo->root; tmp = fifo->root.next->next; av_free(fifo->root.next); fifo->root.next = tmp; return 0; }
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { SetPTSContext *setpts = inlink->dst->priv; double d; AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); if (isnan(setpts->var_values[VAR_STARTPTS])) setpts->var_values[VAR_STARTPTS] = TS2D(inpicref->pts); setpts->var_values[VAR_INTERLACED] = inpicref->video->interlaced; setpts->var_values[VAR_PTS ] = TS2D(inpicref->pts); setpts->var_values[VAR_POS ] = inpicref->pos == -1 ? NAN : inpicref->pos; d = av_expr_eval(setpts->expr, setpts->var_values, NULL); outpicref->pts = D2TS(d); #ifdef DEBUG av_log(inlink->dst, AV_LOG_DEBUG, "n:%"PRId64" interlaced:%d pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n", (int64_t)setpts->var_values[VAR_N], (int)setpts->var_values[VAR_INTERLACED], inpicref ->pos, inpicref ->pts, inpicref ->pts * av_q2d(inlink->time_base), outpicref->pts, outpicref->pts * av_q2d(inlink->time_base)); #endif setpts->var_values[VAR_N] += 1.0; setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts); setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts); ff_start_frame(inlink->dst->outputs[0], outpicref); }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { TransContext *trans = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *buf_out; if (trans->passthrough) return ff_null_start_frame(inlink, picref); outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); if (!outlink->out_buf) return AVERROR(ENOMEM); outlink->out_buf->pts = picref->pts; if (picref->video->sample_aspect_ratio.num == 0) { outlink->out_buf->video->sample_aspect_ratio = picref->video->sample_aspect_ratio; } else { outlink->out_buf->video->sample_aspect_ratio.num = picref->video->sample_aspect_ratio.den; outlink->out_buf->video->sample_aspect_ratio.den = picref->video->sample_aspect_ratio.num; } buf_out = avfilter_ref_buffer(outlink->out_buf, ~0); if (!buf_out) return AVERROR(ENOMEM); return ff_start_frame(outlink, buf_out); }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); AVFilterContext *ctx = inlink->dst; OverlayContext *over = ctx->priv; if (!outpicref) return AVERROR(ENOMEM); outpicref->pts = av_rescale_q(outpicref->pts, ctx->inputs[MAIN]->time_base, ctx->outputs[0]->time_base); if (!over->overpicref || over->overpicref->pts < outpicref->pts) { AVFilterBufferRef *old = over->overpicref; over->overpicref = NULL; ff_request_frame(ctx->inputs[OVERLAY]); if (over->overpicref) { if (old) avfilter_unref_buffer(old); } else over->overpicref = old; } return ff_start_frame(inlink->dst->outputs[0], outpicref); }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *outpicref, *for_next_filter; int ret = 0; outpicref = avfilter_ref_buffer(inpicref, ~0); if (!outpicref) return AVERROR(ENOMEM); for_next_filter = avfilter_ref_buffer(outpicref, ~0); if (!for_next_filter) { avfilter_unref_bufferp(&outpicref); return AVERROR(ENOMEM); } ret = ff_start_frame(outlink, for_next_filter); if (ret < 0) { avfilter_unref_bufferp(&outpicref); return ret; } outlink->out_buf = outpicref; return 0; }
static int end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; AlphaMergeContext *merge = ctx->priv; int is_alpha = (inlink == ctx->inputs[1]); struct FFBufQueue *queue = (is_alpha ? &merge->queue_alpha : &merge->queue_main); ff_bufqueue_add(ctx, queue, inlink->cur_buf); inlink->cur_buf = NULL; while (1) { AVFilterBufferRef *main_buf, *alpha_buf; if (!ff_bufqueue_peek(&merge->queue_main, 0) || !ff_bufqueue_peek(&merge->queue_alpha, 0)) break; main_buf = ff_bufqueue_get(&merge->queue_main); alpha_buf = ff_bufqueue_get(&merge->queue_alpha); ctx->outputs[0]->out_buf = main_buf; ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(main_buf, ~0)); merge->frame_requested = 0; draw_frame(ctx, main_buf, alpha_buf); ff_end_frame(ctx->outputs[0]); avfilter_unref_buffer(alpha_buf); } return 0; }
static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { AspectContext *aspect = link->dst->priv; picref->video->pixel_aspect = aspect->aspect; ff_start_frame(link->dst->outputs[0], picref); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; SelectContext *select = ctx->priv; AVFilterLink *inlink = outlink->src->inputs[0]; select->select = 0; if (av_fifo_size(select->pending_frames)) { AVFilterBufferRef *picref; av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); ff_draw_slice(outlink, 0, outlink->h, 1); ff_end_frame(outlink); avfilter_unref_buffer(picref); return 0; } while (!select->select) { int ret = ff_request_frame(inlink); if (ret < 0) return ret; } return 0; }
static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { AspectContext *aspect = link->dst->priv; picref->video->pixel_aspect = aspect->aspect; link->cur_buf = NULL; return ff_start_frame(link->dst->outputs[0], picref); }
static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { AVFilterBufferRef *outpicref = avfilter_ref_buffer(picref, ~0); link->dst->outputs[0]->out_buf = outpicref; ff_start_frame(link->dst->outputs[0], outpicref); }
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { AVFilterContext *ctx = inlink->dst; int i; for (i = 0; i < ctx->nb_outputs; i++) ff_start_frame(ctx->outputs[i], avfilter_ref_buffer(picref, ~AV_PERM_WRITE)); }
static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { AVFilterContext *ctx = link->dst; ColorMatrixContext *color = ctx->priv; AVFilterBufferRef *outpicref = avfilter_ref_buffer(picref, ~0); color->outpicref = outpicref; return ff_start_frame(link->dst->outputs[0], outpicref); }
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { SetPTSContext *setpts = inlink->dst->priv; double d; AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); if (!outpicref) return AVERROR(ENOMEM); if (isnan(setpts->var_values[VAR_STARTPTS])) { setpts->var_values[VAR_STARTPTS] = TS2D(inpicref->pts); setpts->var_values[VAR_STARTT ] = TS2T(inpicref->pts, inlink->time_base); } setpts->var_values[VAR_PTS ] = TS2D(inpicref->pts); setpts->var_values[VAR_T ] = TS2T(inpicref->pts, inlink->time_base); setpts->var_values[VAR_POS ] = inpicref->pos == -1 ? NAN : inpicref->pos; switch (inlink->type) { case AVMEDIA_TYPE_VIDEO: setpts->var_values[VAR_INTERLACED] = inpicref->video->interlaced; break; case AVMEDIA_TYPE_AUDIO: setpts->var_values[VAR_NB_SAMPLES] = inpicref->audio->nb_samples; break; } d = av_expr_eval(setpts->expr, setpts->var_values, NULL); outpicref->pts = D2TS(d); setpts->var_values[VAR_PREV_INPTS ] = TS2D(inpicref ->pts); setpts->var_values[VAR_PREV_INT ] = TS2T(inpicref ->pts, inlink->time_base); setpts->var_values[VAR_PREV_OUTPTS] = TS2D(outpicref->pts); setpts->var_values[VAR_PREV_OUTT] = TS2T(outpicref->pts, inlink->time_base); av_dlog(inlink->dst, "n:%"PRId64" interlaced:%d nb_samples:%d nb_consumed_samples:%d " "pos:%"PRId64" pts:%"PRId64" t:%f -> pts:%"PRId64" t:%f\n", (int64_t)setpts->var_values[VAR_N], (int)setpts->var_values[VAR_INTERLACED], (int)setpts->var_values[VAR_NB_SAMPLES], (int)setpts->var_values[VAR_NB_CONSUMED_SAMPLES], (int64_t)setpts->var_values[VAR_POS], (int64_t)setpts->var_values[VAR_PREV_INPTS], setpts->var_values[VAR_PREV_INT], (int64_t)setpts->var_values[VAR_PREV_OUTPTS], setpts->var_values[VAR_PREV_OUTT]); setpts->var_values[VAR_N] += 1.0; if (setpts->type == AVMEDIA_TYPE_AUDIO) { setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += inpicref->audio->nb_samples; return ff_filter_samples(inlink->dst->outputs[0], outpicref); } else return ff_start_frame (inlink->dst->outputs[0], outpicref); }
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *outpicref; outpicref = avfilter_ref_buffer(inpicref, ~0); outlink->out_buf = outpicref; ff_start_frame(outlink, outpicref); }
static int end_frame(AVFilterLink *inlink) { int i, j, best_frame_idx = 0; double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1; AVFilterLink *outlink = inlink->dst->outputs[0]; ThumbContext *thumb = inlink->dst->priv; AVFilterContext *ctx = inlink->dst; AVFilterBufferRef *picref; // keep a reference of each frame thumb->frames[thumb->n].buf = inlink->cur_buf; inlink->cur_buf = NULL; // no selection until the buffer of N frames is filled up if (thumb->n < thumb->n_frames - 1) { thumb->n++; return 0; } // average histogram of the N frames for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) { for (i = 0; i < thumb->n_frames; i++) avg_hist[j] += (double)thumb->frames[i].histogram[j]; avg_hist[j] /= thumb->n_frames; } // find the frame closer to the average using the sum of squared errors for (i = 0; i < thumb->n_frames; i++) { sq_err = frame_sum_square_err(thumb->frames[i].histogram, avg_hist); if (i == 0 || sq_err < min_sq_err) best_frame_idx = i, min_sq_err = sq_err; } // free and reset everything (except the best frame buffer) for (i = 0; i < thumb->n_frames; i++) { memset(thumb->frames[i].histogram, 0, sizeof(thumb->frames[i].histogram)); if (i == best_frame_idx) continue; avfilter_unref_buffer(thumb->frames[i].buf); thumb->frames[i].buf = NULL; } thumb->n = 0; // raise the chosen one picref = thumb->frames[best_frame_idx].buf; av_log(ctx, AV_LOG_INFO, "frame id #%d (pts_time=%f) selected\n", best_frame_idx, picref->pts * av_q2d(inlink->time_base)); ff_start_frame(outlink, picref); thumb->frames[best_frame_idx].buf = NULL; ff_draw_slice(outlink, 0, inlink->h, 1); return ff_end_frame(outlink); }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { SetFieldContext *setfield = inlink->dst->priv; AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); if (setfield->mode == MODE_PROG) { outpicref->video->interlaced = 0; } else if (setfield->mode != MODE_AUTO) { outpicref->video->interlaced = 1; outpicref->video->top_field_first = setfield->mode; } return ff_start_frame(inlink->dst->outputs[0], outpicref); }
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { AVFilterContext *ctx = inlink->dst; TileContext *tile = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; if (tile->current) return; outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); avfilter_copy_buffer_ref_props(outlink->out_buf, picref); outlink->out_buf->video->w = outlink->w; outlink->out_buf->video->h = outlink->h; ff_start_frame(outlink, outlink->out_buf); }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { AVFilterContext *ctx = inlink->dst; int i, ret = 0; for (i = 0; i < ctx->nb_outputs; i++) { AVFilterBufferRef *buf_out = avfilter_ref_buffer(picref, ~AV_PERM_WRITE); if (!buf_out) return AVERROR(ENOMEM); ret = ff_start_frame(ctx->outputs[i], buf_out); if (ret < 0) break; } return ret; }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; if (av_cmp_q(inlink->time_base, outlink->time_base)) { int64_t orig_pts = picref->pts; picref->pts = av_rescale_q(picref->pts, inlink->time_base, outlink->time_base); av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n", inlink ->time_base.num, inlink ->time_base.den, orig_pts, outlink->time_base.num, outlink->time_base.den, picref->pts); } inlink->cur_buf = NULL; return ff_start_frame(outlink, picref); }
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *picref2 = picref; if (av_cmp_q(inlink->time_base, outlink->time_base)) { picref2 = avfilter_ref_buffer(picref, ~0); picref2->pts = av_rescale_q(picref->pts, inlink->time_base, outlink->time_base); av_log(ctx, AV_LOG_DEBUG, "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n", inlink ->time_base.num, inlink ->time_base.den, picref ->pts, outlink->time_base.num, outlink->time_base.den, picref2->pts); avfilter_unref_buffer(picref); } ff_start_frame(outlink, picref2); }
static int color_request_frame(AVFilterLink *link) { ColorContext *color = link->src->priv; AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); picref->video->sample_aspect_ratio = (AVRational) {1, 1}; picref->pts = color->pts++; picref->pos = -1; ff_start_frame(link, avfilter_ref_buffer(picref, ~0)); ff_fill_rectangle(&color->draw, &color->color, picref->data, picref->linesize, 0, 0, color->w, color->h); ff_draw_slice(link, 0, color->h, 1); ff_end_frame(link); avfilter_unref_buffer(picref); return 0; }
static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { SliceContext *slice = link->dst->priv; if (slice->use_random_h) { slice->lcg_state = slice->lcg_state * 1664525 + 1013904223; slice->h = 8 + (uint64_t)slice->lcg_state * 25 / UINT32_MAX; } /* ensure that slices play nice with chroma subsampling, and enforce * a reasonable minimum size for the slices */ slice->h = FFMAX(8, slice->h & (-1 << slice->vshift)); av_log(link->dst, AV_LOG_DEBUG, "h:%d\n", slice->h); ff_start_frame(link->dst->outputs[0], picref); }
static int source_request_frame(AVFilterLink *outlink) { Frei0rContext *frei0r = outlink->src->priv; AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = frei0r->pts++; picref->pos = -1; ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), NULL, (uint32_t *)picref->data[0]); ff_draw_slice(outlink, 0, outlink->h, 1); ff_end_frame(outlink); avfilter_unref_buffer(picref); return 0; }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *outpicref; if (inpicref->perms & AV_PERM_PRESERVE) { outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); avfilter_copy_buffer_ref_props(outpicref, inpicref); outpicref->video->w = outlink->w; outpicref->video->h = outlink->h; } else outpicref = inpicref; outlink->out_buf = outpicref; return ff_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0)); }
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) { int ret; FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); switch (link->type) { case AVMEDIA_TYPE_VIDEO: if((ret = ff_start_frame(link, frame)) < 0) return ret; if((ret = ff_draw_slice(link, 0, frame->video->h, 1)) < 0) return ret; if((ret = ff_end_frame(link)) < 0) return ret; return ret; case AVMEDIA_TYPE_AUDIO: return ff_filter_samples(link, frame); default: return AVERROR(EINVAL); } }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { PixdescTestContext *priv = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *outpicref, *for_next_filter; int i, ret = 0; outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); if (!outpicref) return AVERROR(ENOMEM); avfilter_copy_buffer_ref_props(outpicref, picref); for (i = 0; i < 4; i++) { int h = outlink->h; h = i == 1 || i == 2 ? h>>priv->pix_desc->log2_chroma_h : h; if (outpicref->data[i]) { uint8_t *data = outpicref->data[i] + (outpicref->linesize[i] > 0 ? 0 : outpicref->linesize[i] * (h-1)); memset(data, 0, FFABS(outpicref->linesize[i]) * h); } } /* copy palette */ if (priv->pix_desc->flags & PIX_FMT_PAL || priv->pix_desc->flags & PIX_FMT_PSEUDOPAL) memcpy(outpicref->data[1], outpicref->data[1], 256*4); for_next_filter = avfilter_ref_buffer(outpicref, ~0); if (for_next_filter) ret = ff_start_frame(outlink, for_next_filter); else ret = AVERROR(ENOMEM); if (ret < 0) { avfilter_unref_bufferp(&outpicref); return ret; } outlink->out_buf = outpicref; return 0; }
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref) { SelectContext *select = inlink->dst->priv; select->select = select_frame(inlink->dst, picref); if (select->select) { /* frame was requested through poll_frame */ if (select->cache_frames) { if (!av_fifo_space(select->pending_frames)) av_log(inlink->dst, AV_LOG_ERROR, "Buffering limit reached, cannot cache more frames\n"); else av_fifo_generic_write(select->pending_frames, &picref, sizeof(picref), NULL); return; } ff_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(picref, ~0)); } }
static int color_request_frame(AVFilterLink *link) { ColorContext *color = link->src->priv; AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); AVFilterBufferRef *buf_out; int ret; if (!picref) return AVERROR(ENOMEM); picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = color->pts++; picref->pos = -1; buf_out = avfilter_ref_buffer(picref, ~0); if (!buf_out) { ret = AVERROR(ENOMEM); goto fail; } ret = ff_start_frame(link, buf_out); if (ret < 0) goto fail; ff_draw_rectangle(picref->data, picref->linesize, color->line, color->line_step, color->hsub, color->vsub, 0, 0, color->w, color->h); ret = ff_draw_slice(link, 0, color->h, 1); if (ret < 0) goto fail; ret = ff_end_frame(link); fail: avfilter_unref_buffer(picref); return ret; }
static int source_request_frame(AVFilterLink *outlink) { Frei0rContext *frei0r = outlink->src->priv; AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); AVFilterBufferRef *buf_out; int ret; if (!picref) return AVERROR(ENOMEM); picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = frei0r->pts++; picref->pos = -1; buf_out = avfilter_ref_buffer(picref, ~0); if (!buf_out) { ret = AVERROR(ENOMEM); goto fail; } ret = ff_start_frame(outlink, buf_out); if (ret < 0) goto fail; frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), NULL, (uint32_t *)picref->data[0]); ret = ff_draw_slice(outlink, 0, outlink->h, 1); if (ret < 0) goto fail; ret = ff_end_frame(outlink); fail: avfilter_unref_buffer(picref); return ret; }
static int request_frame(AVFilterLink *outlink) { FifoContext *fifo = outlink->src->priv; int ret = 0; if (!fifo->root.next) { if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) return ret; av_assert0(fifo->root.next); } /* by doing this, we give ownership of the reference to the next filter, * so we don't have to worry about dereferencing it ourselves. */ switch (outlink->type) { case AVMEDIA_TYPE_VIDEO: if ((ret = ff_start_frame(outlink, fifo->root.next->buf)) < 0 || (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || (ret = ff_end_frame(outlink)) < 0) return ret; queue_pop(fifo); break; case AVMEDIA_TYPE_AUDIO: if (outlink->request_samples) { return return_audio_frame(outlink->src); } else { ret = ff_filter_samples(outlink, fifo->root.next->buf); queue_pop(fifo); } break; default: return AVERROR(EINVAL); } return ret; }