static void end_frame(AVFilterLink *inlink) { RemovelogoContext *removelogo = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int direct = inpicref == outpicref; blur_image(removelogo->mask, inpicref ->data[0], inpicref ->linesize[0], outpicref->data[0], outpicref->linesize[0], removelogo->full_mask_data, inlink->w, inlink->w, inlink->h, direct, &removelogo->full_mask_bbox); blur_image(removelogo->mask, inpicref ->data[1], inpicref ->linesize[1], outpicref->data[1], outpicref->linesize[1], removelogo->half_mask_data, inlink->w/2, inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); blur_image(removelogo->mask, inpicref ->data[2], inpicref ->linesize[2], outpicref->data[2], outpicref->linesize[2], removelogo->half_mask_data, inlink->w/2, inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); avfilter_draw_slice(outlink, 0, inlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(inpicref); if (!direct) avfilter_unref_buffer(outpicref); }
static av_cold void uninit(AVFilterContext *ctx) { AConvertContext *aconvert = ctx->priv; avfilter_unref_buffer(aconvert->mix_samplesref); avfilter_unref_buffer(aconvert->out_samplesref); if (aconvert->audioconvert_ctx) av_audio_convert_free(aconvert->audioconvert_ctx); }
int ff_filter_samples_framed(AVFilterLink *link, AVFilterBufferRef *samplesref) { int (*filter_samples)(AVFilterLink *, AVFilterBufferRef *); AVFilterPad *src = link->srcpad; AVFilterPad *dst = link->dstpad; int64_t pts; AVFilterBufferRef *buf_out; int ret; FF_TPRINTF_START(NULL, filter_samples); ff_tlog_link(NULL, link, 1); if (link->closed) { avfilter_unref_buffer(samplesref); return AVERROR_EOF; } if (!(filter_samples = dst->filter_samples)) filter_samples = default_filter_samples; av_assert1((samplesref->perms & src->min_perms) == src->min_perms); samplesref->perms &= ~ src->rej_perms; /* prepare to copy the samples if the buffer has insufficient permissions */ if ((dst->min_perms & samplesref->perms) != dst->min_perms || dst->rej_perms & samplesref->perms) { av_log(link->dst, AV_LOG_DEBUG, "Copying audio data in avfilter (have perms %x, need %x, reject %x)\n", samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms); buf_out = ff_default_get_audio_buffer(link, dst->min_perms, samplesref->audio->nb_samples); if (!buf_out) { avfilter_unref_buffer(samplesref); return AVERROR(ENOMEM); } buf_out->pts = samplesref->pts; buf_out->audio->sample_rate = samplesref->audio->sample_rate; /* Copy actual data into new samples buffer */ av_samples_copy(buf_out->extended_data, samplesref->extended_data, 0, 0, samplesref->audio->nb_samples, av_get_channel_layout_nb_channels(link->channel_layout), link->format); avfilter_unref_buffer(samplesref); } else buf_out = samplesref; link->cur_buf = buf_out; pts = buf_out->pts; ret = filter_samples(link, buf_out); ff_update_link_current_pts(link, pts); return ret; }
static void end_frame(AVFilterLink *inlink) { Frei0rContext *frei0r = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; frei0r->update(frei0r->instance, inpicref->pts * av_q2d(inlink->time_base) * 1000, (const uint32_t *)inpicref->data[0], (uint32_t *)outpicref->data[0]); avfilter_unref_buffer(inpicref); ff_draw_slice(outlink, 0, outlink->h, 1); ff_end_frame(outlink); avfilter_unref_buffer(outpicref); }
static void end_frame(AVFilterLink *inlink) { AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; super2xsai(inlink->dst, inpicref->data[0], inpicref->linesize[0], outpicref->data[0], outpicref->linesize[0], inlink->w, inlink->h); avfilter_unref_buffer(inpicref); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(outpicref); }
static void end_frame(AVFilterLink *link) { PluginContext *plugin = link->dst->priv; AVFilterBufferRef *in = link->cur_buf; AVFilterBufferRef *out = link->dst->outputs[0]->out_buf; if ( plugin->process( out->data, out->linesize, in->data, in->linesize, link->w, link->h ) < 0 ) av_log(link->dst, AV_LOG_ERROR, "function process failed\n" ); avfilter_unref_buffer(in); avfilter_draw_slice(link->dst->outputs[0], 0, link->h, 1); avfilter_end_frame(link->dst->outputs[0]); avfilter_unref_buffer(out); }
static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; AVFilterBufferRef *picref; if (!c->has_frame) { av_log(link->src, AV_LOG_ERROR, "request_frame() called with no available frame!\n"); //return -1; } /* This picture will be needed unmodified later for decoding the next * frame */ picref = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE2, link->w, link->h); av_picture_copy((AVPicture *)&picref->data, (AVPicture *)&c->frame, picref->format, link->w, link->h); picref->pts = c->pts; picref->pixel_aspect = c->pixel_aspect; picref->interlaced = c->frame.interlaced_frame; picref->top_field_first = c->frame.top_field_first; avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); avfilter_unref_buffer(picref); c->has_frame = 0; return 0; }
static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref) { double ret = 0; SelectContext *select = ctx->priv; AVFilterBufferRef *prev_picref = select->prev_picref; if (prev_picref && picref->video->h == prev_picref->video->h && picref->video->w == prev_picref->video->w && picref->linesize[0] == prev_picref->linesize[0]) { int x, y; int64_t sad; double mafd, diff; uint8_t *p1 = picref->data[0]; uint8_t *p2 = prev_picref->data[0]; const int linesize = picref->linesize[0]; for (sad = y = 0; y < picref->video->h; y += 8) for (x = 0; x < linesize; x += 8) sad += select->c.sad[1](select, p1 + y * linesize + x, p2 + y * linesize + x, linesize, 8); emms_c(); mafd = sad / (picref->video->h * picref->video->w * 3); diff = fabs(mafd - select->prev_mafd); ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1); select->prev_mafd = mafd; avfilter_unref_buffer(prev_picref); } select->prev_picref = avfilter_ref_buffer(picref, ~0); return ret; }
static int start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0); AVFilterContext *ctx = inlink->dst; OverlayContext *over = ctx->priv; if (!outpicref) return AVERROR(ENOMEM); outpicref->pts = av_rescale_q(outpicref->pts, ctx->inputs[MAIN]->time_base, ctx->outputs[0]->time_base); if (!over->overpicref || over->overpicref->pts < outpicref->pts) { AVFilterBufferRef *old = over->overpicref; over->overpicref = NULL; ff_request_frame(ctx->inputs[OVERLAY]); if (over->overpicref) { if (old) avfilter_unref_buffer(old); } else over->overpicref = old; } return ff_start_frame(inlink->dst->outputs[0], outpicref); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; ResampleContext *s = ctx->priv; int ret = avfilter_request_frame(ctx->inputs[0]); /* flush the lavr delay buffer */ if (ret == AVERROR_EOF && s->avr) { AVFilterBufferRef *buf; int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr), outlink->sample_rate, ctx->inputs[0]->sample_rate, AV_ROUND_UP); if (!nb_samples) return ret; buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples); if (!buf) return AVERROR(ENOMEM); ret = avresample_convert(s->avr, (void**)buf->extended_data, buf->linesize[0], nb_samples, NULL, 0, 0); if (ret <= 0) { avfilter_unref_buffer(buf); return (ret == 0) ? AVERROR_EOF : ret; } buf->pts = s->next_pts; ff_filter_samples(outlink, buf); return 0; } return ret; }
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) { AVFilterContext *ctx = inlink->dst; AlphaMergeContext *merge = ctx->priv; int is_alpha = (inlink == ctx->inputs[1]); struct FFBufQueue *queue = (is_alpha ? &merge->queue_alpha : &merge->queue_main); ff_bufqueue_add(ctx, queue, buf); while (1) { AVFilterBufferRef *main_buf, *alpha_buf; if (!ff_bufqueue_peek(&merge->queue_main, 0) || !ff_bufqueue_peek(&merge->queue_alpha, 0)) break; main_buf = ff_bufqueue_get(&merge->queue_main); alpha_buf = ff_bufqueue_get(&merge->queue_alpha); merge->frame_requested = 0; draw_frame(ctx, main_buf, alpha_buf); ff_filter_frame(ctx->outputs[0], main_buf); avfilter_unref_buffer(alpha_buf); } return 0; }
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) { AVFilterLink *outlink = inlink->dst->outputs[0]; int16_t *taps, *endin, *in, *out; AVFilterBufferRef *outsamples = ff_get_audio_buffer(inlink, AV_PERM_WRITE, insamples->audio->nb_samples); int ret; avfilter_copy_buffer_ref_props(outsamples, insamples); taps = ((EarwaxContext *)inlink->dst->priv)->taps; out = (int16_t *)outsamples->data[0]; in = (int16_t *)insamples ->data[0]; // copy part of new input and process with saved input memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps)); out = scalarproduct(taps, taps + NUMTAPS, out); // process current input endin = in + insamples->audio->nb_samples * 2 - NUMTAPS; out = scalarproduct(in, endin, out); // save part of input for next round memcpy(taps, endin, NUMTAPS * sizeof(*taps)); ret = ff_filter_samples(outlink, outsamples); avfilter_unref_buffer(insamples); return ret; }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; BBoxContext *bbox = ctx->priv; AVFilterBufferRef *picref = inlink->cur_buf; FFBoundingBox box; int has_bbox, w, h; has_bbox = ff_calculate_bounding_box(&box, picref->data[0], picref->linesize[0], inlink->w, inlink->h, 16); w = box.x2 - box.x1 + 1; h = box.y2 - box.y1 + 1; av_log(ctx, AV_LOG_INFO, "n:%d pts:%s pts_time:%s", bbox->frame, av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base)); if (has_bbox) { av_log(ctx, AV_LOG_INFO, " x1:%d x2:%d y1:%d y2:%d w:%d h:%d" " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d", box.x1, box.x2, box.y1, box.y2, w, h, w, h, box.x1, box.y1, /* crop params */ box.x1, box.y1, w, h); /* drawbox params */ } av_log(ctx, AV_LOG_INFO, "\n"); bbox->frame++; avfilter_unref_buffer(picref); ff_end_frame(inlink->dst->outputs[0]); }
static av_cold void uninit(AVFilterContext *ctx) { OverlayContext *over = ctx->priv; if (over->overpicref) avfilter_unref_buffer(over->overpicref); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; SelectContext *select = ctx->priv; AVFilterLink *inlink = outlink->src->inputs[0]; select->select = 0; if (av_fifo_size(select->pending_frames)) { AVFilterBufferRef *picref; av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(picref); return 0; } while (!select->select) { int ret = avfilter_request_frame(inlink); if (ret < 0) return ret; } return 0; }
void avfilter_unref_bufferp(AVFilterBufferRef **ref) { FF_DISABLE_DEPRECATION_WARNINGS avfilter_unref_buffer(*ref); FF_ENABLE_DEPRECATION_WARNINGS *ref = NULL; }
static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; AVFilterBufferRef *picref; if (!c->has_frame) { av_log(link->src, AV_LOG_ERROR, "request_frame() called with no available frame!\n"); return -1; } picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h); av_image_copy(picref->data, picref->linesize, c->frame.data, c->frame.linesize, picref->format, link->w, link->h); picref->pts = c->pts; picref->video->interlaced = c->frame.interlaced_frame; picref->video->top_field_first = c->frame.top_field_first; avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); avfilter_unref_buffer(picref); c->has_frame = 0; return 0; }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) { AConvertContext *aconvert = inlink->dst->priv; AVFilterBufferRef *curbuf = insamplesref; AVFilterLink * const outlink = inlink->dst->outputs[0]; int chan_mult; /* in/reinint the internal buffers if this is the first buffer * provided or it is needed to use a bigger one */ if (!aconvert->max_nb_samples || (curbuf->audio->nb_samples > aconvert->max_nb_samples)) if (init_buffers(inlink, curbuf->audio->nb_samples) < 0) { av_log(inlink->dst, AV_LOG_ERROR, "Could not initialize buffers.\n"); return; } /* if channel mixing is required */ if (aconvert->mix_samplesref) { memcpy(aconvert->in_mix, curbuf->data, sizeof(aconvert->in_mix)); memcpy(aconvert->out_mix, aconvert->mix_samplesref->data, sizeof(aconvert->out_mix)); aconvert->convert_chlayout(aconvert->out_mix, aconvert->in_mix, curbuf->audio->nb_samples, aconvert); curbuf = aconvert->mix_samplesref; } if (aconvert->audioconvert_ctx) { if (!aconvert->mix_samplesref) { if (aconvert->in_conv == aconvert->packed_data) { int i, packed_stride = av_get_bytes_per_sample(inlink->format); aconvert->packed_data[0] = curbuf->data[0]; for (i = 1; i < aconvert->out_nb_channels; i++) aconvert->packed_data[i] = aconvert->packed_data[i-1] + packed_stride; } else { aconvert->in_conv = curbuf->data; } } chan_mult = inlink->planar == outlink->planar && inlink->planar == 0 ? aconvert->out_nb_channels : 1; av_audio_convert(aconvert->audioconvert_ctx, (void * const *) aconvert->out_conv, aconvert->out_strides, (const void * const *) aconvert->in_conv, aconvert->in_strides, curbuf->audio->nb_samples * chan_mult); curbuf = aconvert->out_samplesref; } avfilter_copy_buffer_ref_props(curbuf, insamplesref); curbuf->audio->channel_layout = outlink->channel_layout; curbuf->audio->planar = outlink->planar; avfilter_filter_samples(inlink->dst->outputs[0], avfilter_ref_buffer(curbuf, ~0)); avfilter_unref_buffer(insamplesref); }
static void end_frame(AVFilterLink *inlink) { BufferSinkContext *buf = inlink->dst->priv; if (buf->picref) /* drop the last cached frame */ avfilter_unref_buffer(buf->picref); buf->picref = inlink->cur_buf; }
static av_cold void uninit(AVFilterContext *ctx) { BufferSinkContext *buf = ctx->priv; if (buf->picref) avfilter_unref_buffer(buf->picref); buf->picref = NULL; }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples) { avfilter_filter_samples(inlink->dst->outputs[0], avfilter_ref_buffer(insamples, ~AV_PERM_WRITE)); avfilter_filter_samples(inlink->dst->outputs[1], avfilter_ref_buffer(insamples, ~AV_PERM_WRITE)); avfilter_unref_buffer(insamples); }
FF_DISABLE_DEPRECATION_WARNINGS static void compat_free_buffer(void *opaque, uint8_t *data) { AVFilterBufferRef *buf = opaque; AV_NOWARN_DEPRECATED( avfilter_unref_buffer(buf); ) }
void avfilter_default_end_frame(AVFilterLink *inlink) { AVFilterLink *outlink = NULL; if (inlink->dst->output_count) outlink = inlink->dst->outputs[0]; avfilter_unref_buffer(inlink->cur_buf); inlink->cur_buf = NULL; if (outlink) { if (outlink->out_buf) { avfilter_unref_buffer(outlink->out_buf); outlink->out_buf = NULL; } avfilter_end_frame(outlink); } }
static void smooth_end_frame(AVFilterLink *inlink) { SmoothContext *smooth = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; IplImage inimg, outimg; fill_iplimage_from_picref(&inimg , inpicref , inlink->format); fill_iplimage_from_picref(&outimg, outpicref, inlink->format); cvSmooth(&inimg, &outimg, smooth->type, smooth->param1, smooth->param2, smooth->param3, smooth->param4); fill_picref_from_iplimage(outpicref, &outimg, inlink->format); avfilter_unref_buffer(inpicref); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(outpicref); }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; TileContext *tile = ctx->priv; avfilter_unref_buffer(inlink->cur_buf); if (++tile->current == tile->w * tile->h) end_last_frame(ctx); }
static av_cold void uninit(AVFilterContext *ctx) { BufferSourceContext *s = ctx->priv; if (s->picref) avfilter_unref_buffer(s->picref); s->picref = NULL; avfilter_free(s->scale); s->scale = NULL; }
static av_cold void uninit(AVFilterContext *ctx) { int i; ThumbContext *thumb = ctx->priv; for (i = 0; i < thumb->n_frames && thumb->frames[i].buf; i++) { avfilter_unref_buffer(thumb->frames[i].buf); thumb->frames[i].buf = NULL; } av_freep(&thumb->frames); }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; int i; for (i = 0; i < ctx->output_count; i++) avfilter_end_frame(ctx->outputs[i]); avfilter_unref_buffer(inlink->cur_buf); }
static void end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; OCVContext *ocv = ctx->priv; AVFilterLink *outlink= inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; IplImage inimg, outimg; fill_iplimage_from_picref(&inimg , inpicref , inlink->format); fill_iplimage_from_picref(&outimg, outpicref, inlink->format); ocv->end_frame_filter(ctx, &inimg, &outimg); fill_picref_from_iplimage(outpicref, &outimg, inlink->format); avfilter_unref_buffer(inpicref); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(outpicref); }
/* FIXME: samplesref is same as link->cur_buf. Need to consider removing the redundant parameter. */ void ff_default_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) { AVFilterLink *outlink = NULL; if (inlink->dst->output_count) outlink = inlink->dst->outputs[0]; if (outlink) { outlink->out_buf = ff_default_get_audio_buffer(inlink, AV_PERM_WRITE, samplesref->audio->nb_samples); outlink->out_buf->pts = samplesref->pts; outlink->out_buf->audio->sample_rate = samplesref->audio->sample_rate; ff_filter_samples(outlink, avfilter_ref_buffer(outlink->out_buf, ~0)); avfilter_unref_buffer(outlink->out_buf); outlink->out_buf = NULL; } avfilter_unref_buffer(samplesref); inlink->cur_buf = NULL; }