static void VS_CC mvflowblurCreate(const VSMap *in, VSMap *out, void *userData, VSCore *core, const VSAPI *vsapi) { (void)userData; MVFlowBlurData d; MVFlowBlurData *data; int err; d.blur = (float)vsapi->propGetFloat(in, "blur", 0, &err); if (err) d.blur = 50.0f; d.prec = int64ToIntS(vsapi->propGetInt(in, "prec", 0, &err)); if (err) d.prec = 1; d.thscd1 = int64ToIntS(vsapi->propGetInt(in, "thscd1", 0, &err)); if (err) d.thscd1 = MV_DEFAULT_SCD1; d.thscd2 = int64ToIntS(vsapi->propGetInt(in, "thscd2", 0, &err)); if (err) d.thscd2 = MV_DEFAULT_SCD2; d.opt = !!vsapi->propGetInt(in, "opt", 0, &err); if (err) d.opt = 1; if (d.blur < 0.0f || d.blur > 200.0f) { vsapi->setError(out, "FlowBlur: blur must be between 0 and 200 % (inclusive)."); return; } if (d.prec < 1) { vsapi->setError(out, "FlowBlur: prec must be at least 1."); return; } d.blur256 = (int)(d.blur * 256.0f / 200.0f); d.super = vsapi->propGetNode(in, "super", 0, NULL); #define ERROR_SIZE 1024 char errorMsg[ERROR_SIZE] = "FlowBlur: failed to retrieve first frame from super clip. Error message: "; size_t errorLen = strlen(errorMsg); const VSFrameRef *evil = vsapi->getFrame(0, d.super, errorMsg + errorLen, ERROR_SIZE - errorLen); #undef ERROR_SIZE if (!evil) { vsapi->setError(out, errorMsg); vsapi->freeNode(d.super); return; } const VSMap *props = vsapi->getFramePropsRO(evil); int evil_err[3]; int nHeightS = int64ToIntS(vsapi->propGetInt(props, "Super_height", 0, &evil_err[0])); d.nSuperHPad = int64ToIntS(vsapi->propGetInt(props, "Super_hpad", 0, &evil_err[1])); int nSuperPel = int64ToIntS(vsapi->propGetInt(props, "Super_pel", 0, &evil_err[2])); vsapi->freeFrame(evil); for (int i = 0; i < 2; i++) if (evil_err[i]) { vsapi->setError(out, "FlowBlur: required properties not found in first frame of super clip. Maybe clip didn't come from mv.Super? Was the first frame trimmed away?"); vsapi->freeNode(d.super); return; } d.mvbw = vsapi->propGetNode(in, "mvbw", 0, NULL); d.mvfw = vsapi->propGetNode(in, "mvfw", 0, NULL); #define ERROR_SIZE 512 char error[ERROR_SIZE + 1] = { 0 }; const char *filter_name = "FlowBlur"; adataFromVectorClip(&d.mvbw_data, d.mvbw, filter_name, "mvbw", vsapi, error, ERROR_SIZE); adataFromVectorClip(&d.mvfw_data, d.mvfw, filter_name, "mvfw", vsapi, error, ERROR_SIZE); scaleThSCD(&d.thscd1, &d.thscd2, &d.mvbw_data, filter_name, error, ERROR_SIZE); adataCheckSimilarity(&d.mvbw_data, &d.mvfw_data, filter_name, "mvbw", "mvfw", error, ERROR_SIZE); #undef ERROR_SIZE if (error[0]) { vsapi->setError(out, error); vsapi->freeNode(d.super); vsapi->freeNode(d.mvfw); vsapi->freeNode(d.mvbw); return; } if (d.mvbw_data.nDeltaFrame <= 0 || d.mvfw_data.nDeltaFrame <= 0) { vsapi->setError(out, "FlowBlur: cannot use motion vectors with absolute frame references."); vsapi->freeNode(d.super); vsapi->freeNode(d.mvfw); vsapi->freeNode(d.mvbw); return; } // XXX Alternatively, use both clips' delta as offsets in GetFrame. if (d.mvfw_data.nDeltaFrame != d.mvbw_data.nDeltaFrame) { vsapi->setError(out, "FlowBlur: mvbw and mvfw must be generated with the same delta."); vsapi->freeNode(d.super); vsapi->freeNode(d.mvfw); vsapi->freeNode(d.mvbw); return; } // Make sure the motion vector clips are correct. if (!d.mvbw_data.isBackward || d.mvfw_data.isBackward) { if (!d.mvbw_data.isBackward) vsapi->setError(out, "FlowBlur: mvbw must be generated with isb=True."); else vsapi->setError(out, "FlowBlur: mvfw must be generated with isb=False."); vsapi->freeNode(d.super); vsapi->freeNode(d.mvfw); vsapi->freeNode(d.mvbw); return; } if (d.mvbw_data.nPel == 1) d.finest = vsapi->cloneNodeRef(d.super); // v2.0.9.1 else { VSPlugin *mvtoolsPlugin = vsapi->getPluginById("com.nodame.mvtools", core); VSPlugin *stdPlugin = vsapi->getPluginById("com.vapoursynth.std", core); VSMap *args = vsapi->createMap(); vsapi->propSetNode(args, "super", d.super, paReplace); vsapi->propSetInt(args, "opt", d.opt, paReplace); VSMap *ret = vsapi->invoke(mvtoolsPlugin, "Finest", args); if (vsapi->getError(ret)) { #define ERROR_SIZE 512 char error_msg[ERROR_SIZE + 1] = { 0 }; snprintf(error_msg, ERROR_SIZE, "FlowBlur: %s", vsapi->getError(ret)); #undef ERROR_SIZE vsapi->setError(out, error_msg); vsapi->freeNode(d.super); vsapi->freeNode(d.mvfw); vsapi->freeNode(d.mvbw); vsapi->freeMap(args); vsapi->freeMap(ret); return; } d.finest = vsapi->propGetNode(ret, "clip", 0, NULL); vsapi->freeMap(ret); vsapi->clearMap(args); vsapi->propSetNode(args, "clip", d.finest, paReplace); vsapi->freeNode(d.finest); ret = vsapi->invoke(stdPlugin, "Cache", args); vsapi->freeMap(args); if (vsapi->getError(ret)) { #define ERROR_SIZE 512 char error_msg[ERROR_SIZE + 1] = { 0 }; snprintf(error_msg, ERROR_SIZE, "FlowBlur: %s", vsapi->getError(ret)); #undef ERROR_SIZE vsapi->setError(out, error_msg); vsapi->freeNode(d.super); vsapi->freeNode(d.mvfw); vsapi->freeNode(d.mvbw); vsapi->freeMap(ret); return; } d.finest = vsapi->propGetNode(ret, "clip", 0, NULL); vsapi->freeMap(ret); } d.node = vsapi->propGetNode(in, "clip", 0, 0); d.vi = vsapi->getVideoInfo(d.node); const VSVideoInfo *supervi = vsapi->getVideoInfo(d.super); int nSuperWidth = supervi->width; if (d.mvbw_data.nHeight != nHeightS || d.mvbw_data.nWidth != nSuperWidth - d.nSuperHPad * 2 || d.mvbw_data.nPel != nSuperPel) { vsapi->setError(out, "FlowBlur: wrong source or super clip frame size."); vsapi->freeNode(d.finest); vsapi->freeNode(d.super); vsapi->freeNode(d.mvfw); vsapi->freeNode(d.mvbw); vsapi->freeNode(d.node); return; } if (!isConstantFormat(d.vi) || d.vi->format->bitsPerSample > 16 || d.vi->format->sampleType != stInteger || d.vi->format->subSamplingW > 1 || d.vi->format->subSamplingH > 1 || (d.vi->format->colorFamily != cmYUV && d.vi->format->colorFamily != cmGray)) { vsapi->setError(out, "FlowBlur: input clip must be GRAY, 420, 422, 440, or 444, up to 16 bits, with constant dimensions."); vsapi->freeNode(d.super); vsapi->freeNode(d.finest); vsapi->freeNode(d.mvfw); vsapi->freeNode(d.mvbw); vsapi->freeNode(d.node); return; } d.nHeightUV = d.mvbw_data.nHeight / d.mvbw_data.yRatioUV; d.nWidthUV = d.mvbw_data.nWidth / d.mvbw_data.xRatioUV; d.nHPaddingUV = d.mvbw_data.nHPadding / d.mvbw_data.xRatioUV; //d.nVPaddingUV = d.mvbw_data.nHPadding / d.mvbw_data.yRatioUV; // original looks wrong d.nVPaddingUV = d.mvbw_data.nVPadding / d.mvbw_data.yRatioUV; d.VPitchY = d.mvbw_data.nWidth; d.VPitchUV = d.nWidthUV; simpleInit(&d.upsizer, d.mvbw_data.nWidth, d.mvbw_data.nHeight, d.mvbw_data.nBlkX, d.mvbw_data.nBlkY, d.opt); if (d.vi->format->colorFamily != cmGray) simpleInit(&d.upsizerUV, d.nWidthUV, d.nHeightUV, d.mvbw_data.nBlkX, d.mvbw_data.nBlkY, d.opt); data = (MVFlowBlurData *)malloc(sizeof(d)); *data = d; vsapi->createFilter(in, out, "FlowBlur", mvflowblurInit, mvflowblurGetFrame, mvflowblurFree, fmParallel, 0, data, core); }
static void VS_CC mvmaskCreate(const VSMap *in, VSMap *out, void *userData, VSCore *core, const VSAPI *vsapi) { (void)userData; MVMaskData d; MVMaskData *data; int err; d.ml = (float)vsapi->propGetFloat(in, "ml", 0, &err); if (err) d.ml = 100.0f; d.fGamma = (float)vsapi->propGetFloat(in, "gamma", 0, &err); if (err) d.fGamma = 1.0f; d.kind = int64ToIntS(vsapi->propGetInt(in, "kind", 0, &err)); double time = vsapi->propGetFloat(in, "time", 0, &err); if (err) time = 100.0; d.nSceneChangeValue = int64ToIntS(vsapi->propGetInt(in, "ysc", 0, &err)); d.thscd1 = vsapi->propGetInt(in, "thscd1", 0, &err); if (err) d.thscd1 = MV_DEFAULT_SCD1; d.thscd2 = int64ToIntS(vsapi->propGetInt(in, "thscd2", 0, &err)); if (err) d.thscd2 = MV_DEFAULT_SCD2; d.opt = !!vsapi->propGetInt(in, "opt", 0, &err); if (err) d.opt = 1; if (d.fGamma < 0.0f) { vsapi->setError(out, "Mask: gamma must not be negative."); return; } if (d.kind < 0 || d.kind > 5) { vsapi->setError(out, "Mask: kind must 0, 1, 2, 3, 4, or 5."); return; } if (time < 0.0 || time > 100.0) { vsapi->setError(out, "Mask: time must be between 0.0 and 100.0 (inclusive)."); return; } if (d.nSceneChangeValue < 0 || d.nSceneChangeValue > 255) { vsapi->setError(out, "Mask: ysc must be between 0 and 255 (inclusive)."); return; } d.vectors = vsapi->propGetNode(in, "vectors", 0, NULL); #define ERROR_SIZE 512 char error[ERROR_SIZE + 1] = { 0 }; const char *filter_name = "Mask"; adataFromVectorClip(&d.vectors_data, d.vectors, filter_name, "vectors", vsapi, error, ERROR_SIZE); scaleThSCD(&d.thscd1, &d.thscd2, &d.vectors_data, filter_name, error, ERROR_SIZE); #undef ERROR_SIZE if (error[0]) { vsapi->setError(out, error); vsapi->freeNode(d.vectors); return; } d.fMaskNormFactor = 1.0f / d.ml; // Fizick d.fMaskNormFactor2 = d.fMaskNormFactor * d.fMaskNormFactor; d.fHalfGamma = d.fGamma * 0.5f; d.nWidthB = d.vectors_data.nBlkX * (d.vectors_data.nBlkSizeX - d.vectors_data.nOverlapX) + d.vectors_data.nOverlapX; d.nHeightB = d.vectors_data.nBlkY * (d.vectors_data.nBlkSizeY - d.vectors_data.nOverlapY) + d.vectors_data.nOverlapY; d.nHeightUV = d.vectors_data.nHeight / d.vectors_data.yRatioUV; d.nWidthUV = d.vectors_data.nWidth / d.vectors_data.xRatioUV; d.nHeightBUV = d.nHeightB / d.vectors_data.yRatioUV; d.nWidthBUV = d.nWidthB / d.vectors_data.xRatioUV; d.node = vsapi->propGetNode(in, "clip", 0, NULL); d.vi = *vsapi->getVideoInfo(d.node); if (!isConstantFormat(&d.vi) || d.vi.format->bitsPerSample > 8 || d.vi.format->subSamplingW > 1 || d.vi.format->subSamplingH > 1 || (d.vi.format->colorFamily != cmYUV && d.vi.format->colorFamily != cmGray)) { vsapi->setError(out, "Mask: input clip must be GRAY8, YUV420P8, YUV422P8, YUV440P8, or YUV444P8, with constant dimensions."); vsapi->freeNode(d.node); vsapi->freeNode(d.vectors); return; } if (d.vi.format->colorFamily == cmGray) d.vi.format = vsapi->getFormatPreset(pfYUV444P8, core); simpleInit(&d.upsizer, d.nWidthB, d.nHeightB, d.vectors_data.nBlkX, d.vectors_data.nBlkY, d.opt); simpleInit(&d.upsizerUV, d.nWidthBUV, d.nHeightBUV, d.vectors_data.nBlkX, d.vectors_data.nBlkY, d.opt); d.time256 = (int)(time * 256 / 100); data = (MVMaskData *)malloc(sizeof(d)); *data = d; vsapi->createFilter(in, out, "Mask", mvmaskInit, mvmaskGetFrame, mvmaskFree, fmParallel, 0, data, core); }