コード例 #1
0
static int filter_frame(AVFilterLink *inlink,  AVFilterBufferRef *in)
{
    AVFilterContext *ctx = inlink->dst;
    FilterData *fd = ctx->priv;
    TransformData* td = &(fd->td);

    AVFilterLink *outlink = inlink->dst->outputs[0];
    //const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    //int hsub0 = desc->log2_chroma_w;
    //int vsub0 = desc->log2_chroma_h;
    int direct = 0;
    AVFilterBufferRef *out;
    VSFrame inframe;
    VSFrame outframe;
    int plane;

    if (in->perms & AV_PERM_WRITE) {
        direct = 1;
        out = in;
    } else {
        out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
        if (!out) {
            avfilter_unref_bufferp(&in);
            return AVERROR(ENOMEM);
        }
        avfilter_copy_buffer_ref_props(out, in);
    }

    for(plane=0; plane < td->fiSrc.planes; plane++){
        inframe.data[plane] = in->data[plane];
        inframe.linesize[plane] = in->linesize[plane];
    }
    for(plane=0; plane < td->fiDest.planes; plane++){
        outframe.data[plane] = out->data[plane];
        outframe.linesize[plane] = out->linesize[plane];
    }


    transformPrepare(td, &inframe, &outframe);

    if (fd->td.fiSrc.pFormat > PF_PACKED) {
        transformRGB(td, getNextTransform(td, &fd->trans));
    } else {
        transformYUV(td, getNextTransform(td, &fd->trans));
    }
    transformFinish(td);

    if (!direct)
        avfilter_unref_bufferp(&in);

    return ff_filter_frame(outlink, out);
}
コード例 #2
0
static int deshake_filter_video(TCModuleInstance *self,
				vframe_list_t *frame)
{
  DeshakeData *sd = NULL;

  TC_MODULE_SELF_CHECK(self, "filter_video");
  TC_MODULE_SELF_CHECK(frame, "filter_video");

  sd = self->userdata;
  MotionDetect* md = &(sd->md);
  TransformData* td = &(sd->td);
  LocalMotions localmotions;
  Transform motion;
  VSFrame vsFrame;
  fillFrameFromBuffer(&vsFrame,frame->video_buf, &td->fiSrc);

  if(motionDetection(md, &localmotions, &vsFrame)!= VS_OK){
    tc_log_error(MOD_NAME, "motion detection failed");
    return TC_ERROR;
  }

  if(writeToFile(md, sd->f, &localmotions) != VS_OK)
  motion = simpleMotionsToTransform(td, &localmotions);
  vs_vector_del(&localmotions);

  transformPrepare(td, &vsFrame, &vsFrame);

  Transform t = lowPassTransforms(td, &sd->avg, &motion);
  /* tc_log_error(MOD_NAME, "Trans: det: %f %f %f \n\t\t act: %f %f %f %f",  */
  /* 	       motion.x, motion.y, motion.alpha, */
  /* 	       t.x, t.y, t.alpha, t.zoom); */

  if (sd->vob->im_v_codec == CODEC_RGB) {
    transformRGB(td, t);
  } else if (sd->vob->im_v_codec == CODEC_YUV) {
    transformYUV(td, t);
  } else {
    tc_log_error(MOD_NAME, "unsupported Codec: %i\n", sd->vob->im_v_codec);
    return TC_ERROR;
  }
  transformFinish(td);
  return TC_OK;
}