static int request_frame(AVFilterLink *outlink) { FifoContext *fifo = outlink->src->priv; BufPic *tmp; int ret; if (!fifo->root.next) { if ((ret = ff_request_frame(outlink->src->inputs[0]) < 0)) return ret; } /* by doing this, we give ownership of the reference to the next filter, * so we don't have to worry about dereferencing it ourselves. */ ff_start_frame(outlink, fifo->root.next->picref); ff_draw_slice (outlink, 0, outlink->h, 1); ff_end_frame (outlink); if (fifo->last == fifo->root.next) fifo->last = &fifo->root; tmp = fifo->root.next->next; av_free(fifo->root.next); fifo->root.next = tmp; return 0; }
static int draw_slice(AVFilterLink *inlink, int y0, int h, int slice_dir) { AlphaExtractContext *extract = inlink->dst->priv; AVFilterBufferRef *cur_buf = inlink->cur_buf; AVFilterBufferRef *out_buf = inlink->dst->outputs[0]->out_buf; if (extract->is_packed_rgb) { int x, y; uint8_t *pin, *pout; for (y = y0; y < (y0 + h); y++) { pin = cur_buf->data[0] + y * cur_buf->linesize[0] + extract->rgba_map[A]; pout = out_buf->data[0] + y * out_buf->linesize[0]; for (x = 0; x < out_buf->video->w; x++) { *pout = *pin; pout += 1; pin += 4; } } } else if (cur_buf->linesize[A] == out_buf->linesize[Y]) { const int linesize = cur_buf->linesize[A]; memcpy(out_buf->data[Y] + y0 * linesize, cur_buf->data[A] + y0 * linesize, linesize * h); } else { const int linesize = FFMIN(out_buf->linesize[Y], cur_buf->linesize[A]); int y; for (y = y0; y < (y0 + h); y++) { memcpy(out_buf->data[Y] + y * out_buf->linesize[Y], cur_buf->data[A] + y * cur_buf->linesize[A], linesize); } } return ff_draw_slice(inlink->dst->outputs[0], y0, h, slice_dir); }
static int end_frame(AVFilterLink *inlink) { RemovelogoContext *removelogo = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int direct = inpicref == outpicref; blur_image(removelogo->mask, inpicref ->data[0], inpicref ->linesize[0], outpicref->data[0], outpicref->linesize[0], removelogo->full_mask_data, inlink->w, inlink->w, inlink->h, direct, &removelogo->full_mask_bbox); blur_image(removelogo->mask, inpicref ->data[1], inpicref ->linesize[1], outpicref->data[1], outpicref->linesize[1], removelogo->half_mask_data, inlink->w/2, inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); blur_image(removelogo->mask, inpicref ->data[2], inpicref ->linesize[2], outpicref->data[2], outpicref->linesize[2], removelogo->half_mask_data, inlink->w/2, inlink->w/2, inlink->h/2, direct, &removelogo->half_mask_bbox); ff_draw_slice(outlink, 0, inlink->h, 1); return ff_end_frame(outlink); }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; SelectContext *select = ctx->priv; AVFilterLink *inlink = outlink->src->inputs[0]; select->select = 0; if (av_fifo_size(select->pending_frames)) { AVFilterBufferRef *picref; av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); ff_draw_slice(outlink, 0, outlink->h, 1); ff_end_frame(outlink); avfilter_unref_buffer(picref); return 0; } while (!select->select) { int ret = ff_request_frame(inlink); if (ret < 0) return ret; } return 0; }
static int end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; BoxBlurContext *boxblur = ctx->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int plane; int cw = inlink->w >> boxblur->hsub, ch = inlink->h >> boxblur->vsub; int w[4] = { inlink->w, cw, cw, inlink->w }; int h[4] = { inlink->h, ch, ch, inlink->h }; for (plane = 0; inpicref->data[plane] && plane < 4; plane++) hblur(outpicref->data[plane], outpicref->linesize[plane], inpicref ->data[plane], inpicref ->linesize[plane], w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], boxblur->temp); for (plane = 0; inpicref->data[plane] && plane < 4; plane++) vblur(outpicref->data[plane], outpicref->linesize[plane], outpicref->data[plane], outpicref->linesize[plane], w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], boxblur->temp); ff_draw_slice(outlink, 0, inlink->h, 1); return avfilter_default_end_frame(inlink); }
static int draw_slice(AVFilterLink *inlink, int y0, int h0, int slice_dir) { AVFilterContext *ctx = inlink->dst; BoxBlurContext *boxblur = ctx->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int plane; int cw = inlink->w >> boxblur->hsub, ch = h0 >> boxblur->vsub; int w[4] = { inlink->w, cw, cw, inlink->w }; int h[4] = { h0, ch, ch, h0 }; for (plane = 0; inpicref->data[plane] && plane < 4; plane++) hblur(outpicref->data[plane], outpicref->linesize[plane], inpicref ->data[plane], inpicref ->linesize[plane], w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], boxblur->temp); for (plane = 0; inpicref->data[plane] && plane < 4; plane++) vblur(outpicref->data[plane], outpicref->linesize[plane], outpicref->data[plane], outpicref->linesize[plane], w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane], boxblur->temp); return ff_draw_slice(outlink, y0, h0, slice_dir); }
static void draw_frame(AVFilterContext *ctx, AVFilterBufferRef *main_buf, AVFilterBufferRef *alpha_buf) { AlphaMergeContext *merge = ctx->priv; int h = main_buf->video->h; if (merge->is_packed_rgb) { int x, y; uint8_t *pin, *pout; for (y = 0; y < h; y++) { pin = alpha_buf->data[0] + y * alpha_buf->linesize[0]; pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A]; for (x = 0; x < main_buf->video->w; x++) { *pout = *pin; pin += 1; pout += 4; } } } else { int y; const int main_linesize = main_buf->linesize[A]; const int alpha_linesize = alpha_buf->linesize[Y]; for (y = 0; y < h && y < alpha_buf->video->h; y++) { memcpy(main_buf->data[A] + y * main_linesize, alpha_buf->data[Y] + y * alpha_linesize, FFMIN(main_linesize, alpha_linesize)); } } ff_draw_slice(ctx->outputs[0], 0, h, 1); }
static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { SelectContext *select = inlink->dst->priv; if (select->select && !select->cache_frames) ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); }
static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { PixdescTestContext *priv = inlink->dst->priv; AVFilterBufferRef *inpic = inlink->cur_buf; AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf; int i, c, w = inlink->w; for (c = 0; c < priv->pix_desc->nb_components; c++) { int w1 = c == 1 || c == 2 ? w>>priv->pix_desc->log2_chroma_w : w; int h1 = c == 1 || c == 2 ? h>>priv->pix_desc->log2_chroma_h : h; int y1 = c == 1 || c == 2 ? y>>priv->pix_desc->log2_chroma_h : y; for (i = y1; i < y1 + h1; i++) { av_read_image_line(priv->line, inpic->data, inpic->linesize, priv->pix_desc, 0, i, c, w1, 0); av_write_image_line(priv->line, outpic->data, outpic->linesize, priv->pix_desc, 0, i, c, w1); } } return ff_draw_slice(inlink->dst->outputs[0], y, h, slice_dir); }
static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { AVFilterContext *ctx = inlink->dst; int i; for (i = 0; i < ctx->nb_outputs; i++) ff_draw_slice(ctx->outputs[i], y, h, slice_dir); }
static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { SliceContext *slice = link->dst->priv; int y2; if (slice_dir == 1) { for (y2 = y; y2 + slice->h <= y + h; y2 += slice->h) ff_draw_slice(link->dst->outputs[0], y2, slice->h, slice_dir); if (y2 < y + h) ff_draw_slice(link->dst->outputs[0], y2, y + h - y2, slice_dir); } else if (slice_dir == -1) { for (y2 = y + h; y2 - slice->h >= y; y2 -= slice->h) ff_draw_slice(link->dst->outputs[0], y2 - slice->h, slice->h, slice_dir); if (y2 > y) ff_draw_slice(link->dst->outputs[0], y, y2 - y, slice_dir); } }
static void end_last_frame(AVFilterContext *ctx) { TileContext *tile = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; while (tile->current < tile->w * tile->h) draw_blank_frame(ctx); ff_draw_slice(outlink, 0, outlink->out_buf->video->h, 1); ff_end_frame(outlink); tile->current = 0; }
static int draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { AVFilterContext *ctx = inlink->dst; int i, ret = 0; for (i = 0; i < ctx->nb_outputs; i++) { ret = ff_draw_slice(ctx->outputs[i], y, h, slice_dir); if (ret < 0) break; } return ret; }
static int end_frame(AVFilterLink *inlink) { int i, j, best_frame_idx = 0; double avg_hist[HIST_SIZE] = {0}, sq_err, min_sq_err = -1; AVFilterLink *outlink = inlink->dst->outputs[0]; ThumbContext *thumb = inlink->dst->priv; AVFilterContext *ctx = inlink->dst; AVFilterBufferRef *picref; // keep a reference of each frame thumb->frames[thumb->n].buf = inlink->cur_buf; inlink->cur_buf = NULL; // no selection until the buffer of N frames is filled up if (thumb->n < thumb->n_frames - 1) { thumb->n++; return 0; } // average histogram of the N frames for (j = 0; j < FF_ARRAY_ELEMS(avg_hist); j++) { for (i = 0; i < thumb->n_frames; i++) avg_hist[j] += (double)thumb->frames[i].histogram[j]; avg_hist[j] /= thumb->n_frames; } // find the frame closer to the average using the sum of squared errors for (i = 0; i < thumb->n_frames; i++) { sq_err = frame_sum_square_err(thumb->frames[i].histogram, avg_hist); if (i == 0 || sq_err < min_sq_err) best_frame_idx = i, min_sq_err = sq_err; } // free and reset everything (except the best frame buffer) for (i = 0; i < thumb->n_frames; i++) { memset(thumb->frames[i].histogram, 0, sizeof(thumb->frames[i].histogram)); if (i == best_frame_idx) continue; avfilter_unref_buffer(thumb->frames[i].buf); thumb->frames[i].buf = NULL; } thumb->n = 0; // raise the chosen one picref = thumb->frames[best_frame_idx].buf; av_log(ctx, AV_LOG_INFO, "frame id #%d (pts_time=%f) selected\n", best_frame_idx, picref->pts * av_q2d(inlink->time_base)); ff_start_frame(outlink, picref); thumb->frames[best_frame_idx].buf = NULL; ff_draw_slice(outlink, 0, inlink->h, 1); return ff_end_frame(outlink); }
static void end_frame(AVFilterLink *inlink) { Frei0rContext *frei0r = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; frei0r->update(frei0r->instance, inpicref->pts * av_q2d(inlink->time_base) * 1000, (const uint32_t *)inpicref->data[0], (uint32_t *)outpicref->data[0]); avfilter_unref_buffer(inpicref); ff_draw_slice(outlink, 0, outlink->h, 1); ff_end_frame(outlink); avfilter_unref_buffer(outpicref); }
static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { AVFilterContext *ctx = inlink->dst; BlackFrameContext *blackframe = ctx->priv; AVFilterBufferRef *picref = inlink->cur_buf; int x, i; uint8_t *p = picref->data[0] + y * picref->linesize[0]; for (i = 0; i < h; i++) { for (x = 0; x < inlink->w; x++) blackframe->nblack += p[x] < blackframe->bthresh; p += picref->linesize[0]; } ff_draw_slice(ctx->outputs[0], y, h, slice_dir); }
static int end_frame(AVFilterLink *inlink) { Frei0rContext *frei0r = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int ret; frei0r->update(frei0r->instance, inpicref->pts * av_q2d(inlink->time_base) * 1000, (const uint32_t *)inpicref->data[0], (uint32_t *)outpicref->data[0]); if ((ret = ff_draw_slice(outlink, 0, outlink->h, 1)) || (ret = ff_end_frame(outlink)) < 0) return ret; return 0; }
static int end_frame(AVFilterLink *link) { AVFilterContext *ctx = link->dst; ColorMatrixContext *color = ctx->priv; AVFilterBufferRef *out = color->outpicref; if (link->cur_buf->format == AV_PIX_FMT_YUV422P) process_frame_yuv422p(color, out, link->cur_buf); else if (link->cur_buf->format == AV_PIX_FMT_YUV420P) process_frame_yuv420p(color, out, link->cur_buf); else process_frame_uyvy422(color, out, link->cur_buf); ff_draw_slice(ctx->outputs[0], 0, link->dst->outputs[0]->h, 1); return ff_end_frame(ctx->outputs[0]); }
static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { AVFilterContext *ctx = inlink->dst; FieldOrderContext *fieldorder = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; /** can only currently do slices if this filter is doing nothing * because this filter is moving picture content, the output * slice will contain different video lines than the input slice * and that complexity will be added later */ if ( !inpicref->video->interlaced || inpicref->video->top_field_first == fieldorder->dst_tff) { ff_draw_slice(outlink, y, h, slice_dir); } }
static int color_request_frame(AVFilterLink *link) { ColorContext *color = link->src->priv; AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); picref->video->sample_aspect_ratio = (AVRational) {1, 1}; picref->pts = color->pts++; picref->pos = -1; ff_start_frame(link, avfilter_ref_buffer(picref, ~0)); ff_fill_rectangle(&color->draw, &color->color, picref->data, picref->linesize, 0, 0, color->w, color->h); ff_draw_slice(link, 0, color->h, 1); ff_end_frame(link); avfilter_unref_buffer(picref); return 0; }
static int source_request_frame(AVFilterLink *outlink) { Frei0rContext *frei0r = outlink->src->priv; AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = frei0r->pts++; picref->pos = -1; ff_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), NULL, (uint32_t *)picref->data[0]); ff_draw_slice(outlink, 0, outlink->h, 1); ff_end_frame(outlink); avfilter_unref_buffer(picref); return 0; }
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame) { int ret; FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); switch (link->type) { case AVMEDIA_TYPE_VIDEO: if((ret = ff_start_frame(link, frame)) < 0) return ret; if((ret = ff_draw_slice(link, 0, frame->video->h, 1)) < 0) return ret; if((ret = ff_end_frame(link)) < 0) return ret; return ret; case AVMEDIA_TYPE_AUDIO: return ff_filter_samples(link, frame); default: return AVERROR(EINVAL); } }
static int end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; OCVContext *ocv = ctx->priv; AVFilterLink *outlink= inlink->dst->outputs[0]; AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; IplImage inimg, outimg; int ret; fill_iplimage_from_picref(&inimg , inpicref , inlink->format); fill_iplimage_from_picref(&outimg, outpicref, inlink->format); ocv->end_frame_filter(ctx, &inimg, &outimg); fill_picref_from_iplimage(outpicref, &outimg, inlink->format); if ((ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || (ret = ff_end_frame(outlink)) < 0) return ret; return 0; }
static int end_frame(AVFilterLink *inlink) { HQDN3DContext *hqdn3d = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterBufferRef *inpic = inlink ->cur_buf; AVFilterBufferRef *outpic = outlink->out_buf; int ret, c; for (c = 0; c < 3; c++) { denoise(inpic->data[c], outpic->data[c], hqdn3d->line, &hqdn3d->frame_prev[c], inpic->video->w >> (!!c * hqdn3d->hsub), inpic->video->h >> (!!c * hqdn3d->vsub), inpic->linesize[c], outpic->linesize[c], hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]); } if ((ret = ff_draw_slice(outlink, 0, inpic->video->h, 1)) < 0 || (ret = ff_end_frame(outlink)) < 0) return ret; return 0; }
static int color_request_frame(AVFilterLink *link) { ColorContext *color = link->src->priv; AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); AVFilterBufferRef *buf_out; int ret; if (!picref) return AVERROR(ENOMEM); picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = color->pts++; picref->pos = -1; buf_out = avfilter_ref_buffer(picref, ~0); if (!buf_out) { ret = AVERROR(ENOMEM); goto fail; } ret = ff_start_frame(link, buf_out); if (ret < 0) goto fail; ff_draw_rectangle(picref->data, picref->linesize, color->line, color->line_step, color->hsub, color->vsub, 0, 0, color->w, color->h); ret = ff_draw_slice(link, 0, color->h, 1); if (ret < 0) goto fail; ret = ff_end_frame(link); fail: avfilter_unref_buffer(picref); return ret; }
static int source_request_frame(AVFilterLink *outlink) { Frei0rContext *frei0r = outlink->src->priv; AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); AVFilterBufferRef *buf_out; int ret; if (!picref) return AVERROR(ENOMEM); picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = frei0r->pts++; picref->pos = -1; buf_out = avfilter_ref_buffer(picref, ~0); if (!buf_out) { ret = AVERROR(ENOMEM); goto fail; } ret = ff_start_frame(outlink, buf_out); if (ret < 0) goto fail; frei0r->update(frei0r->instance, av_rescale_q(picref->pts, frei0r->time_base, (AVRational){1,1000}), NULL, (uint32_t *)picref->data[0]); ret = ff_draw_slice(outlink, 0, outlink->h, 1); if (ret < 0) goto fail; ret = ff_end_frame(outlink); fail: avfilter_unref_buffer(picref); return ret; }
static int request_frame(AVFilterLink *outlink) { FifoContext *fifo = outlink->src->priv; int ret = 0; if (!fifo->root.next) { if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) return ret; av_assert0(fifo->root.next); } /* by doing this, we give ownership of the reference to the next filter, * so we don't have to worry about dereferencing it ourselves. */ switch (outlink->type) { case AVMEDIA_TYPE_VIDEO: if ((ret = ff_start_frame(outlink, fifo->root.next->buf)) < 0 || (ret = ff_draw_slice(outlink, 0, outlink->h, 1)) < 0 || (ret = ff_end_frame(outlink)) < 0) return ret; queue_pop(fifo); break; case AVMEDIA_TYPE_AUDIO: if (outlink->request_samples) { return return_audio_frame(outlink->src); } else { ret = ff_filter_samples(outlink, fifo->root.next->buf); queue_pop(fifo); } break; default: return AVERROR(EINVAL); } return ret; }
static int end_frame(AVFilterLink *link) { DeshakeContext *deshake = link->dst->priv; AVFilterBufferRef *in = link->cur_buf; AVFilterBufferRef *out = link->dst->outputs[0]->out_buf; Transform t = {{0},0}, orig = {{0},0}; float matrix[9]; float alpha = 2.0 / deshake->refcount; char tmp[256]; link->cur_buf = NULL; /* it is in 'in' now */ if (deshake->cx < 0 || deshake->cy < 0 || deshake->cw < 0 || deshake->ch < 0) { // Find the most likely global motion for the current frame find_motion(deshake, (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0], in->data[0], link->w, link->h, in->linesize[0], &t); } else { uint8_t *src1 = (deshake->ref == NULL) ? in->data[0] : deshake->ref->data[0]; uint8_t *src2 = in->data[0]; deshake->cx = FFMIN(deshake->cx, link->w); deshake->cy = FFMIN(deshake->cy, link->h); if ((unsigned)deshake->cx + (unsigned)deshake->cw > link->w) deshake->cw = link->w - deshake->cx; if ((unsigned)deshake->cy + (unsigned)deshake->ch > link->h) deshake->ch = link->h - deshake->cy; // Quadword align right margin deshake->cw &= ~15; src1 += deshake->cy * in->linesize[0] + deshake->cx; src2 += deshake->cy * in->linesize[0] + deshake->cx; find_motion(deshake, src1, src2, deshake->cw, deshake->ch, in->linesize[0], &t); } // Copy transform so we can output it later to compare to the smoothed value orig.vector.x = t.vector.x; orig.vector.y = t.vector.y; orig.angle = t.angle; orig.zoom = t.zoom; // Generate a one-sided moving exponential average deshake->avg.vector.x = alpha * t.vector.x + (1.0 - alpha) * deshake->avg.vector.x; deshake->avg.vector.y = alpha * t.vector.y + (1.0 - alpha) * deshake->avg.vector.y; deshake->avg.angle = alpha * t.angle + (1.0 - alpha) * deshake->avg.angle; deshake->avg.zoom = alpha * t.zoom + (1.0 - alpha) * deshake->avg.zoom; // Remove the average from the current motion to detect the motion that // is not on purpose, just as jitter from bumping the camera t.vector.x -= deshake->avg.vector.x; t.vector.y -= deshake->avg.vector.y; t.angle -= deshake->avg.angle; t.zoom -= deshake->avg.zoom; // Invert the motion to undo it t.vector.x *= -1; t.vector.y *= -1; t.angle *= -1; // Write statistics to file if (deshake->fp) { snprintf(tmp, 256, "%f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f, %f\n", orig.vector.x, deshake->avg.vector.x, t.vector.x, orig.vector.y, deshake->avg.vector.y, t.vector.y, orig.angle, deshake->avg.angle, t.angle, orig.zoom, deshake->avg.zoom, t.zoom); fwrite(tmp, sizeof(char), strlen(tmp), deshake->fp); } // Turn relative current frame motion into absolute by adding it to the // last absolute motion t.vector.x += deshake->last.vector.x; t.vector.y += deshake->last.vector.y; t.angle += deshake->last.angle; t.zoom += deshake->last.zoom; // Shrink motion by 10% to keep things centered in the camera frame t.vector.x *= 0.9; t.vector.y *= 0.9; t.angle *= 0.9; // Store the last absolute motion information deshake->last.vector.x = t.vector.x; deshake->last.vector.y = t.vector.y; deshake->last.angle = t.angle; deshake->last.zoom = t.zoom; // Generate a luma transformation matrix avfilter_get_matrix(t.vector.x, t.vector.y, t.angle, 1.0 + t.zoom / 100.0, matrix); // Transform the luma plane avfilter_transform(in->data[0], out->data[0], in->linesize[0], out->linesize[0], link->w, link->h, matrix, INTERPOLATE_BILINEAR, deshake->edge); // Generate a chroma transformation matrix avfilter_get_matrix(t.vector.x / (link->w / CHROMA_WIDTH(link)), t.vector.y / (link->h / CHROMA_HEIGHT(link)), t.angle, 1.0 + t.zoom / 100.0, matrix); // Transform the chroma planes avfilter_transform(in->data[1], out->data[1], in->linesize[1], out->linesize[1], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge); avfilter_transform(in->data[2], out->data[2], in->linesize[2], out->linesize[2], CHROMA_WIDTH(link), CHROMA_HEIGHT(link), matrix, INTERPOLATE_BILINEAR, deshake->edge); // Store the current frame as the reference frame for calculating the // motion of the next frame if (deshake->ref != NULL) avfilter_unref_buffer(deshake->ref); // Cleanup the old reference frame deshake->ref = in; // Draw the transformed frame information ff_draw_slice(link->dst->outputs[0], 0, link->h, 1); return ff_end_frame(link->dst->outputs[0]); }
static int end_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; FieldOrderContext *fieldorder = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVFilterBufferRef *inpicref = inlink->cur_buf; AVFilterBufferRef *outpicref = outlink->out_buf; int h, plane, line_step, line_size, line; uint8_t *cpy_src, *cpy_dst; if ( inpicref->video->interlaced && inpicref->video->top_field_first != fieldorder->dst_tff) { av_dlog(ctx, "picture will move %s one line\n", fieldorder->dst_tff ? "up" : "down"); h = inpicref->video->h; for (plane = 0; plane < 4 && inpicref->data[plane]; plane++) { line_step = inpicref->linesize[plane]; line_size = fieldorder->line_size[plane]; cpy_src = inpicref->data[plane]; cpy_dst = outpicref->data[plane]; if (fieldorder->dst_tff) { /** Move every line up one line, working from * the top to the bottom of the frame. * The original top line is lost. * The new last line is created as a copy of the * penultimate line from that field. */ for (line = 0; line < h; line++) { if (1 + line < outpicref->video->h) { memcpy(cpy_dst, cpy_src + line_step, line_size); } else { memcpy(cpy_dst, cpy_src - line_step - line_step, line_size); } cpy_src += line_step; cpy_dst += line_step; } } else { /** Move every line down one line, working from * the bottom to the top of the frame. * The original bottom line is lost. * The new first line is created as a copy of the * second line from that field. */ cpy_src += (h - 1) * line_step; cpy_dst += (h - 1) * line_step; for (line = h - 1; line >= 0 ; line--) { if (line > 0) { memcpy(cpy_dst, cpy_src - line_step, line_size); } else { memcpy(cpy_dst, cpy_src + line_step + line_step, line_size); } cpy_src -= line_step; cpy_dst -= line_step; } } } outpicref->video->top_field_first = fieldorder->dst_tff; ff_draw_slice(outlink, 0, h, 1); } else { av_dlog(ctx, "not interlaced or field order already correct\n"); } return ff_end_frame(outlink); }
static int end_frame(AVFilterLink *inlink) { TransContext *trans = inlink->dst->priv; AVFilterBufferRef *inpic = inlink->cur_buf; AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf; AVFilterLink *outlink = inlink->dst->outputs[0]; int plane, ret; if (trans->passthrough) return ff_null_end_frame(inlink); for (plane = 0; outpic->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? trans->hsub : 0; int vsub = plane == 1 || plane == 2 ? trans->vsub : 0; int pixstep = trans->pixsteps[plane]; int inh = inpic->video->h>>vsub; int outw = outpic->video->w>>hsub; int outh = outpic->video->h>>vsub; uint8_t *out, *in; int outlinesize, inlinesize; int x, y; out = outpic->data[plane]; outlinesize = outpic->linesize[plane]; in = inpic ->data[plane]; inlinesize = inpic ->linesize[plane]; if (trans->dir&1) { in += inpic->linesize[plane] * (inh-1); inlinesize *= -1; } if (trans->dir&2) { out += outpic->linesize[plane] * (outh-1); outlinesize *= -1; } for (y = 0; y < outh; y++) { switch (pixstep) { case 1: for (x = 0; x < outw; x++) out[x] = in[x*inlinesize + y]; break; case 2: for (x = 0; x < outw; x++) *((uint16_t *)(out + 2*x)) = *((uint16_t *)(in + x*inlinesize + y*2)); break; case 3: for (x = 0; x < outw; x++) { int32_t v = AV_RB24(in + x*inlinesize + y*3); AV_WB24(out + 3*x, v); } break; case 4: for (x = 0; x < outw; x++) *((uint32_t *)(out + 4*x)) = *((uint32_t *)(in + x*inlinesize + y*4)); break; } out += outlinesize; } } if ((ret = ff_draw_slice(outlink, 0, outpic->video->h, 1)) < 0 || (ret = ff_end_frame(outlink)) < 0) return ret; return 0; }