예제 #1
0
PVideoFrame __stdcall ShapeMask::GetFrame(int n, IScriptEnvironment* env) {
	int colorspace;

	if (vi.IsRGB24())      colorspace = RGB24;
	else if (vi.IsRGB32()) colorspace = RGB32;
	else if (vi.IsYUY2())  colorspace = YUV2;
	else if (vi.IsYV12())  colorspace = YV12;
	else raiseError(env, "Unsupported color space, must be one of RGB24, RGB32, YUV2 or YV12");

	PClip srcClip = toGrayScale(env, child);
	PVideoFrame src = srcClip->GetFrame(n, env);
	PVideoFrame dst = env->NewVideoFrame(vi);

	const uchar* srcp = src->GetReadPtr();
	const int src_pitch = src->GetPitch();
	const int bpp = vi.BitsPerPixel();

	uchar* retp;

	// No change to the source pixels in the process steps, so ok to cast to non-const
	// returns a 1 channel gray scale image which needs to be converted to whatever format the source clip is in.
	retp = process_frame((uchar*)srcp, vi.width, vi.height, src_pitch, colorspace, threshold, minarea, rectonly);

	if (vi.IsPlanar()) copyPlanar(retp, dst, bpp);
	else if (vi.IsYUY2()) copyYUY2(retp, dst);
	else copyRGB(retp, dst, bpp);

	delete retp;
	return dst;
}
예제 #2
0
AVSValue __cdecl Create_SoundTouch(AVSValue args, void*, IScriptEnvironment* env) {

  try {	// HIDE DAMN SEH COMPILER BUG!!!

  PClip clip = args[0].AsClip();

  if (!clip->GetVideoInfo().HasAudio())
    env->ThrowError("Input clip does not have audio.");

  if (!(clip->GetVideoInfo().SampleType()&SAMPLE_FLOAT))
    env->ThrowError("Input audio sample format to TimeStretch must be float.");

  if (args[0].AsClip()->GetVideoInfo().AudioChannels() == 2) {
    return new AVSStereoSoundTouch(args[0].AsClip(), 
      (float)args[1].AsFloat(100.0), 
      (float)args[2].AsFloat(100.0), 
      (float)args[3].AsFloat(100.0), 
	    &args[4],
      env);
  }
  return new AVSsoundtouch(args[0].AsClip(), 
    (float)args[1].AsFloat(100.0), 
    (float)args[2].AsFloat(100.0), 
    (float)args[3].AsFloat(100.0), 
	  &args[4],
    env);

	}
	catch (...) { throw; }
}
예제 #3
0
int main() {
    try {
        cout << "Creating script environment 1..." << endl;
        IScriptEnvironment* env1 = CreateScriptEnvironment(3);

        cout << "Creating script environment 2..." << endl;
        IScriptEnvironment* env2 = CreateScriptEnvironment(3);

        cout << "Deleting script environment 1..." << endl;
        delete env1;

        cout << "Invoking BlankClip on env 2..." << endl;
        AVSValue ret = env2->Invoke("BlankClip", AVSValue(), 0);
        PClip clp = ret.AsClip();

        cout << "Reading frame 0 from env2..." << endl;
        PVideoFrame frm = clp->GetFrame(0, env2);
    } catch (AvisynthError &e) {
        cerr << "AvisynthError: " << e.msg << endl;
        return -1;
    } catch (...) {
        cerr << "unknown error" << endl;
        return -1;
    }

    return 0;
}
예제 #4
0
AVSValue __cdecl CreateAreaResize(AVSValue args, void* user_data, IScriptEnvironment* env)
{
    PClip clip = args[0].AsClip();
    int target_width = args[1].AsInt();
    int target_height = args[2].AsInt();

    if (target_width < 1 || target_height < 1) {
        env->ThrowError("AreaResize: target width/height must be 1 or higher.");
    }

    const VideoInfo& vi = clip->GetVideoInfo();
    if (vi.IsYUY2()) {
        env->ThrowError("AreaResize: Unsupported colorspace(YUY2).");
    }
    if (vi.IsYV411() && target_width & 3) {
        env->ThrowError("AreaResize: Target width requires mod 4.");
    }
    if ((vi.IsYV16() || vi.IsYV12()) && target_width & 1) {
        env->ThrowError("AreaResize: Target width requires mod 2.");
    }
    if (vi.IsYV12() && target_height & 1) {
        env->ThrowError("AreaResize: Target height requires mod 2.");
    }
    if (vi.width < target_width || vi.height < target_height) {
        env->ThrowError("AreaResize: This filter is only for down scale.");
    }

    return new AreaResize(clip, target_width, target_height, env);
}
예제 #5
0
파일: fturn.cpp 프로젝트: tp7/fturn
FTurn::FTurn(PClip child, TurnDirection direction, bool chroma, bool mt, IScriptEnvironment* env) 
    : GenericVideoFilter(child), chroma_(chroma), mt_(mt) {
    if (!isSupportedColorspace(vi.pixel_type)) {
        env->ThrowError(getUnsupportedColorspaceMessage());
    }

    if (!(env->GetCPUFlags() & CPUF_SSE2)) {
        env->ThrowError("Sorry, at least SSE2 is required");
    }
    
    int CPUInfo[4]; //eax, ebx, ecx, edx
    __cpuid(CPUInfo, 1);

    #pragma warning(disable: 4800)
    bool ssse3 = CPUInfo[2] & 0x00000200;
    #pragma warning(disable: 4800)

    if (direction == TurnDirection::RIGHT || direction == TurnDirection::LEFT) {
        vi.width = child->GetVideoInfo().height;
        vi.height = child->GetVideoInfo().width;

        if (direction == TurnDirection::LEFT) {
            turnFunction_ = turnPlaneLeft;
        } else {
            turnFunction_ = ssse3 ? turnPlaneRightSSSE3 : turnPlaneRightSSE2;
        }
    } else {
        turnFunction_ = ssse3 ? turnPlane180SSSE3 : turnPlane180SSE2;
    }
}
예제 #6
0
	Binary::Binary(BinaryComputePlane *_computeplane, PClip _child, PClip _secondclip, AVSValue _chroma, IScriptEnvironment *env)
	: SupportFilter(_child, env), secondclip(_secondclip), 
	computeplane(*_computeplane),
	computechroma(  *(decipherchromaargument(_chroma, makevector(_child,_secondclip), *_computeplane) ) )
	{
		if(_secondclip->GetVideoInfo().width != vi.width)
			env->ThrowError("LimitedSupport binary filter: widths do not match.");
		if(_secondclip->GetVideoInfo().height != vi.height)
			env->ThrowError("LimitedSupport binary filter: heights do not match.");
	}
예제 #7
0
TMaskCleaner::TMaskCleaner(PClip child, int length, int thresh, IScriptEnvironment* env) : GenericVideoFilter(child), m_length(length), m_thresh(thresh), lookup(nullptr) {
    if (!child->GetVideoInfo().IsYV12()) {
        env->ThrowError("Only YV12 and YV24 is supported!");
    }
    if (length <= 0 || thresh <= 0) {
        env->ThrowError("Invalid arguments!");
    }
    lookup = new BYTE[child->GetVideoInfo().height * child->GetVideoInfo().width / 8];
    m_w = child->GetVideoInfo().width;
}
예제 #8
0
FilteredResizeH::FilteredResizeH( PClip _child, double subrange_left, double subrange_width,
                                  int target_width, ResamplingFunction* func, IScriptEnvironment* env )
  : GenericVideoFilter(_child), tempY(0), tempUV(0),pattern_luma(0),pattern_chroma(0),
  CodeGenerator(false) //Josh: Codegenerator construtor takes arg x64
{
	try {	// HIDE DAMN SEH COMPILER BUG!!!
  pattern_luma = pattern_chroma = (int *)0;
  tempUV = tempY = 0;

  original_width = _child->GetVideoInfo().width;

  if (target_width<=0)
    env->ThrowError("Resize: Width must be greater than 0.");

  if (vi.IsYUV())
  {
    if ((target_width&1) && (vi.IsYUY2()))
      env->ThrowError("Resize: YUY2 width must be even");
    if ((target_width&1) && (vi.IsYV12()))
      env->ThrowError("Resize: YV12 width must be even.");

    tempY = (BYTE*) _aligned_malloc(original_width*2+4+32, 64);   // aligned for Athlon cache line
    tempUV = (BYTE*) _aligned_malloc(original_width*4+8+32, 64);  // aligned for Athlon cache line

    if (vi.IsYV12()) {
      pattern_chroma = GetResamplingPatternYUV( vi.width>>1, subrange_left/2.0, subrange_width/2.0,
        target_width>>1, func, true, tempY, env );
    } else {
      pattern_chroma = GetResamplingPatternYUV( vi.width>>1, subrange_left/2.0, subrange_width/2.0,
        target_width>>1, func, false, tempUV, env );
    }
    pattern_luma = GetResamplingPatternYUV(vi.width, subrange_left, subrange_width, target_width, func, true, tempY, env);
  }
예제 #9
0
int AvxContext::OutputAudio() {
	FILE *sink;
	void *writeBuffer = NULL;
	sighandler_t old_sigpipe = signal(SIGPIPE, SIG_IGN);

	if (launchMPlayer) {
		char command[1024];
		if (MPlayerCommandAudio(command))
			return -1;
		AVXLOG_INFO("MPlayer command line: %s", command);

		sink = popen(command, "w");
		if (!sink) {
			AVXLOG_ERROR("%s", "Error starting mplayer");
			return -1;
		}
	} else {
		sink = stdout;
	}

	#define AUDIO_SAMPLES 1000
	try {
		writeBuffer = malloc(vi.BytesPerAudioSample() * AUDIO_SAMPLES);
		if (!writeBuffer) {
			AVXLOG_ERROR("%s", "Unable to allocate memory");
			goto fail;
		}
		for (__int64 i = 0; i < vi.num_audio_samples; i += AUDIO_SAMPLES) {
			if (launchMPlayer && (feof(sink) || ferror(sink))) {
				AVXLOG_ERROR("%s", "mplayer process exited");
				break;
			}
			int read_samples;
			if (vi.num_audio_samples - AUDIO_SAMPLES < i)
				read_samples = vi.num_audio_samples - i;
			else
				read_samples = AUDIO_SAMPLES;
			clip->GetAudio(writeBuffer, i, read_samples, avx_library.env);
			fwrite(writeBuffer, vi.BytesPerAudioSample(), read_samples, sink);
		}
	} catch (AvisynthError &e) {
		AVXLOG_ERROR("AvisynthError: %s", e.msg);
		goto fail;
	}
	#undef AUDIO_SAMPLES

	free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return 0;

fail:
	if (writeBuffer)
		free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return -1;
}
예제 #10
0
PClip AlignPlanar::Create(PClip clip) 
{
  if (!clip->GetVideoInfo().IsPlanar()) {  // If not planar, already ok.
    return clip;
  }
  else 
    return new AlignPlanar(clip);
}
예제 #11
0
MVDegrainMulti::MVDegrainMulti(PClip _child, PClip mvMulti, int _RefFrames, int _thSAD, int _thSADC, int _YUVplanes, int _nLimit,
					          PClip _pelclip, int _nIdx, int _nSCD1, int _nSCD2, bool _mmx, bool _isse, int _MaxThreads,
                              int _PreFetch, int _SadMode, IScriptEnvironment* env) :
			    MVDegrainBase(_child, _RefFrames, _YUVplanes, _nLimit, _pelclip, _nIdx, _mmx, _isse, env, mvMulti, 
                              "MVDegrainMulti", 0, _MaxThreads, _PreFetch, _SadMode), RefFrames(_RefFrames)
{
    if (RefFrames<1 || RefFrames>32) env->ThrowError("MVDegrainMulti: refframes must be >=1 and <=32");

    // get the true number of reference frames
    VideoInfo mvMultivi=mvMulti->GetVideoInfo();
    unsigned int RefFramesAvailable=mvMultivi.height/2;

    // if refframes is greater than MVAnalyseMulti height then limit to height
    if (RefFramesAvailable<RefFrames) {
        RefFrames=RefFramesAvailable;
        UpdateNumRefFrames(RefFrames, env);
    }

    // PreFetch max 21 since 21*3=63 and 64 is max threads at one time
    if (_PreFetch<1 || _PreFetch>21) env->ThrowError("MVDegrainMulti: PreFetch must be >=1 and <=21");

    if (_PreFetch*RefFrames>32) env->ThrowError("MVDegrainMulti: PreFetch*RefFrames<=32");

    // initialize MVClip's which are in order BX, ..., B3, B2, B1, F1, F2, F3, ..., FX in mvMulti
    for (unsigned int PreFetchNum=0; PreFetchNum<static_cast<unsigned int>(_PreFetch); ++PreFetchNum) {
        if (RefFrames<RefFramesAvailable) {
            // we are taking a subset of the mvMulti clip
            for(unsigned int RefNum=0; RefNum<RefFrames; ++RefNum) {
                pmvClipF[PreFetchNum][RefNum]=new MVClip(mvMulti, _nSCD1, _nSCD2, env, true, RefFramesAvailable+RefNum);  
                pmvClipB[PreFetchNum][RefNum]=new MVClip(mvMulti, _nSCD1, _nSCD2, env, true, RefFramesAvailable-RefNum-1);   
            }               
        }
        else {
            // we are taking the full mvMulti clip
            for(unsigned int RefNum=0; RefNum<RefFrames; ++RefNum) {
                pmvClipF[PreFetchNum][RefNum]=new MVClip(mvMulti, _nSCD1, _nSCD2, env, true, RefFrames+RefNum);  
                pmvClipB[PreFetchNum][RefNum]=new MVClip(mvMulti, _nSCD1, _nSCD2, env, true, RefFrames-RefNum-1);   
            }
        }
    }

    // check simularities
    CheckSimilarity(*pmvClipF[0][0], "mvMulti", env); // only need to check one since they are grouped together

    // normalize thSAD
    thSAD  = _thSAD*pmvClipB[0][0]->GetThSCD1()/_nSCD1; // normalize to block SAD
    thSADC = _thSADC*pmvClipB[0][0]->GetThSCD1()/_nSCD1; // chroma

    // find the maximum extent
    unsigned int MaxDelta=static_cast<unsigned int>(pmvClipF[0][RefFrames-1]->GetDeltaFrame());
    if (static_cast<unsigned int>(pmvClipB[0][RefFrames-1]->GetDeltaFrame())>MaxDelta)
        MaxDelta=static_cast<unsigned int>(pmvClipB[0][RefFrames-1]->GetDeltaFrame());

    // numframes 2*MaxDelta+1, i.e. to cover all possible frames in sliding window
    mvCore->AddFrames(nIdx, (2*MaxDelta)*_PreFetch+1, pmvClipB[0][0]->GetLevelCount(), nWidth, nHeight, nPel, nHPadding, nVPadding, 
                      YUVPLANES, _isse, yRatioUV);
}
	AdjustedColorDifference::AdjustedColorDifference(IScriptEnvironment* env, PClip input, double factor, PClip subtrahend /* optional */) :
		GenericVideoFilter(input), m_factor(factor), m_subtrahend(subtrahend)
	{
		if (!vi.IsRGB32())
			env->ThrowError("plugin supports only RGB32 input");
		if (subtrahend != nullptr) {
			auto svi = subtrahend->GetVideoInfo();
			CheckVideoInfo(env, vi, svi);
		}
	}
예제 #13
0
AVSValue __cdecl StillImage::CreateElements(AVSValue args, void* user_data, IScriptEnvironment* env)
{
    Q_UNUSED(user_data)
    const PClip background = args[0].AsClip();
    const VideoInfo backgroundVI = background->GetVideoInfo();
    const AVSValue &elementValues = args[1];
    QStringList elements;
    for (int i = 0; i < elementValues.ArraySize(); ++i) {
        const QLatin1String element(elementValues[i].AsString());
        if (Filters::elementAvailable(element))
            env->ThrowError("QtAviSynthElements: Invalid element '%s'.", element.latin1());
        elements.append(element);
    }
    QImage image(backgroundVI.width, backgroundVI.height, QImage::Format_ARGB32);
    image.fill(Tools::transparentColor);
    QPainter p(&image);
    Filters::paintElements(&p, elements, image.rect());
    const PClip elementsClip = new StillImage(backgroundVI, image, env);
    return new RgbOverlay(background, elementsClip, env);
}
예제 #14
0
AVSValue __cdecl Create_AutoTrace(AVSValue args, void* user_data, IScriptEnvironment* env) {
	PClip clip = args[0].AsClip();
	const VideoInfo& vi = clip->GetVideoInfo();
	if (vi.IsRGB24()) {
		at_fitting_opts_type* fitting_opts = at_fitting_opts_new();
		// Setting fitting opts based on input
		fitting_opts->color_count = args[3].AsInt(0);
		int destWidth = args[1].AsInt(0);
		int destHeight = args[2].AsInt(0);
		// If the inputs are left off entirely (or 0 or negative), then use the
		// input size. If either one is left off (or 0 or negative), then
		// determine that one based on presevering the aspect ratio of the
		// given value.
		if (destWidth <= 0) {
			if (destHeight <= 0) {
				destWidth = vi.width;
				destHeight = vi.height;
			} else {
				// Calculate width based off desired height
				destWidth = destHeight * vi.width / vi.height;
			}
		} else if (destHeight <= 0) {
			// Calculate height based off desired width
			destHeight = destWidth * vi.height / vi.width;
		}
		if (args[4].Defined()) {
			// background_color
			int background = args[4].AsInt();
			if (background != -1) {
				// To match the documentation, ignore -1, even though it would
				// be a valid color. (And argueably makes more sense than
				// 0xFFFFFF, as it has the alpha channel set to full.)
				// Note that R and B are swapped. This is by design - rather
				// than convert a BGR image into an RGB image as AutoTrace
				// expects, we just let the B and R channels be "backwards" as
				// within AutoTrace.
				fitting_opts->background_color = at_color_new(
					(background & 0x0000FF),
					(background & 0x00FF00) >> 8,
					(background & 0xFF0000) >> 16);
			}
예제 #15
0
/// @brief Read from environment
/// @param _clip
///
void AvisynthAudioProvider::LoadFromClip(AVSValue _clip) {
	AVSValue script;

	// Check if it has audio
	VideoInfo vi = _clip.AsClip()->GetVideoInfo();
	if (!vi.HasAudio()) throw agi::AudioDataNotFoundError("No audio found.", 0);

	IScriptEnvironment *env = avs_wrapper.GetEnv();

	// Convert to one channel
	char buffer[1024];
	strcpy(buffer,lagi_wxString(OPT_GET("Audio/Downmixer")->GetString()).mb_str(csConvLocal));
	script = env->Invoke(buffer, _clip);

	// Convert to 16 bits per sample
	script = env->Invoke("ConvertAudioTo16bit", script);
	vi = script.AsClip()->GetVideoInfo();

	// Convert sample rate
	int setsample = OPT_GET("Provider/Audio/AVS/Sample Rate")->GetInt();
	if (vi.SamplesPerSecond() < 32000) setsample = 44100;
	if (setsample != 0) {
		AVSValue args[2] = { script, setsample };
		script = env->Invoke("ResampleAudio", AVSValue(args,2));
	}

	// Set clip
	PClip tempclip = script.AsClip();
	vi = tempclip->GetVideoInfo();

	// Read properties
	channels = vi.AudioChannels();
	num_samples = vi.num_audio_samples;
	sample_rate = vi.SamplesPerSecond();
	bytes_per_sample = vi.BytesPerAudioSample();
	float_samples = false;

	clip = tempclip;
}
예제 #16
0
AVSValue __cdecl StillImage::CreateSvg(AVSValue args, void* user_data, IScriptEnvironment* env)
{
    Q_UNUSED(user_data)

    const PClip background = args[0].AsClip();
    const VideoInfo backgroundVI = background->GetVideoInfo();
    const QString svgFileName =
        Tools::cleanFileName(QLatin1String(args[1].AsString()));
    const AVSValue &elementValues = args[2];
    QStringList elements;
    for (int i = 0; i < elementValues.ArraySize(); ++i) {
        const QLatin1String element(elementValues[i].AsString());
        Tools::checkSvgAndThrow(svgFileName, element, env);
        elements.append(element);
    }
    QImage image(backgroundVI.width, backgroundVI.height, QImage::Format_ARGB32);
    image.fill(Tools::transparentColor);
    QPainter p(&image);
    Filters::paintSvgElements(&p, svgFileName, elements, image.rect());
    const PClip svgClip = new StillImage(backgroundVI, image, env);
    return new RgbOverlay(background, svgClip, env);
}
예제 #17
0
    ColorQuantize(PClip originClip, int paletteSize,
                  bool useGlobalPalette, FREE_IMAGE_QUANTIZE algorithm,
                  const char *globalPaletteOutputFile, IScriptEnvironment* env)
        : m_origin(originClip)
        , m_paletteSize(paletteSize)
        , m_useGlobalPalette(useGlobalPalette)
        , m_algorithm(algorithm)
        , m_targetVideoInfo(originClip->GetVideoInfo())
        , m_globalPalette(0)
    {
        if (!originClip->GetVideoInfo().IsRGB24()) {
            m_originRgb = env->Invoke("ConvertToRgb24", originClip).AsClip();
            m_targetVideoInfo.pixel_type = VideoInfo::CS_BGR24;
        } else {
            m_originRgb = originClip;
        }

        if (m_useGlobalPalette) {
            FIBITMAP *hugeImage =
                    FreeImage_Allocate(m_targetVideoInfo.width,
                                       m_targetVideoInfo.height * m_targetVideoInfo.num_frames,
                                       24);
            for (int frame = 0; frame < m_targetVideoInfo.num_frames; ++frame) {
                const PVideoFrame videoFrame = m_originRgb->GetFrame(frame, env);
                copyVideoFrameToImage(m_originRgb->GetFrame(frame, env),hugeImage, frame * m_targetVideoInfo.height);
            }
            FIBITMAP *quantizedImage =
                    FreeImage_ColorQuantizeEx(hugeImage, algorithm, m_paletteSize);
            FreeImage_Unload(hugeImage);
            m_globalPalette = new RGBQUAD[m_paletteSize];
            memcpy(m_globalPalette, FreeImage_GetPalette(quantizedImage), m_paletteSize * sizeof(RGBQUAD));
            FreeImage_Unload(quantizedImage);
            if (globalPaletteOutputFile)
                savePaletteImage(globalPaletteOutputFile, m_globalPalette, m_paletteSize);
        }
    }
예제 #18
0
int AvxContext::OpenFile() {
	try {
		AVSValue ret = avx_library.env->Invoke("Import", scriptName);
		if (!ret.IsClip()) {
			AVXLOG_ERROR("%s", "Script did not return a clip");
			return -1;
		}
		clip = ret.AsClip();
		vi = clip->GetVideoInfo();
	} catch (AvisynthError &e) {
		AVXLOG_ERROR("AvisynthError: %s", e.msg);
		return -1;
	}

	return 0;
}
예제 #19
0
                audio_type& audio(void) {
                    const VideoInfo& vi = mv_clip->GetVideoInfo();
                    const caudio_type::info_type info = {
                        vi.HasAudio(),
                        vi.AudioChannels(),
                        caudio_type::bit_depth(vi.sample_type),
                        (vi.sample_type == SAMPLE_FLOAT ? false : true),
                        static_cast<double>(vi.num_audio_samples)
                            / vi.SamplesPerSecond(),
                        vi.SamplesPerSecond(),
                        vi.num_audio_samples,
                        vi.BytesPerAudioSample()
                    };

                    if (mv_audio == NULL) mv_audio =
                        new caudio_type(mv_clip, mv_se.get(), info);
                    return *mv_audio;
                }
예제 #20
0
                /*
                 *  An object of a class cavs_type has a possession of mv_se,
                 *  cvideo_type and caudio_type objects are just allowed to borrow
                 *  mv_se.
                 * */
                video_type& video(void) {
                    const VideoInfo& vi = mv_clip->GetVideoInfo();
                    const cvideo_type::info_type info = {
                        vi.HasVideo(),
                        vi.width,
                        vi.height,
                        static_cast<double>(vi.num_frames) * vi.fps_denominator
                            / vi.fps_numerator,
                        static_cast<double>(vi.fps_numerator) / vi.fps_denominator,
                        vi.fps_numerator,
                        vi.fps_denominator,
                        vi.num_frames,
                        cvideo_type::fourcc(vi.pixel_type),
                        vi.BitsPerPixel(),
                        vi.IsFieldBased(),
                        vi.IsTFF()
                    };

                    if (mv_video == NULL) mv_video =
                        new cvideo_type(mv_clip, mv_se.get(), info);
                    return *mv_video;
                }
예제 #21
0
MVDegrain1::MVDegrain1(
	PClip _child, PClip _super, PClip mvbw, PClip mvfw,
	int _thSAD, int _thSADC, int _YUVplanes, int _nLimit, int _nLimitC,
	int _nSCD1, int _nSCD2, bool _isse, bool _planar, bool _lsb_flag,
	bool mt_flag, IScriptEnvironment* env
)
:	GenericVideoFilter(_child)
,	MVFilter ((! mvfw) ? mvbw : mvfw,  "MDegrain1",    env, (! mvfw) ? 2 : 1, (! mvfw) ? 1 : 0)
,	mvClipF  ((! mvfw) ? mvbw : mvfw,  _nSCD1, _nSCD2, env, (! mvfw) ? 2 : 1, (! mvfw) ? 1 : 0)
,	mvClipB  ((! mvfw) ? mvbw : mvbw,  _nSCD1, _nSCD2, env, (! mvfw) ? 2 : 1, (! mvfw) ? 0 : 0)
,	super (_super)
,	lsb_flag (_lsb_flag)
,	height_lsb_mul ((_lsb_flag) ? 2 : 1)
,	DstShort (0)
,	DstInt (0)
{
	thSAD = _thSAD*mvClipB.GetThSCD1()/_nSCD1; // normalize to block SAD
	thSADC = _thSADC*mvClipB.GetThSCD1()/_nSCD1; // chroma threshold, normalized to block SAD
	YUVplanes = _YUVplanes;
	nLimit = _nLimit;
	nLimitC = _nLimitC;

	isse = _isse;
	planar = _planar;

	CheckSimilarity(mvClipF, "mvfw", env);
	CheckSimilarity(mvClipB, "mvbw", env);

	const ::VideoInfo &	vi_super = _super->GetVideoInfo ();

	// get parameters of prepared super clip - v2.0
	SuperParams64Bits params;
	memcpy(&params, &vi_super.num_audio_samples, 8);
	int nHeightS = params.nHeight;
	int nSuperHPad = params.nHPad;
	int nSuperVPad = params.nVPad;
	int nSuperPel = params.nPel;
	nSuperModeYUV = params.nModeYUV;
	int nSuperLevels = params.nLevels;

	pRefBGOF = new MVGroupOfFrames(nSuperLevels, nWidth, nHeight, nSuperPel, nSuperHPad, nSuperVPad, nSuperModeYUV, isse, yRatioUV, mt_flag);
	pRefFGOF = new MVGroupOfFrames(nSuperLevels, nWidth, nHeight, nSuperPel, nSuperHPad, nSuperVPad, nSuperModeYUV, isse, yRatioUV, mt_flag);
	int nSuperWidth  = vi_super.width;
	int nSuperHeight = vi_super.height;

	if (   nHeight != nHeightS
	    || nHeight != vi.height
	    || nWidth  != nSuperWidth-nSuperHPad*2
	    || nWidth  != vi.width
	    || nPel    != nSuperPel)
	{
		env->ThrowError("MDegrain1 : wrong source or super frame size");
	}


   if ( (pixelType & VideoInfo::CS_YUY2) == VideoInfo::CS_YUY2 && !planar)
   {
		DstPlanes =  new YUY2Planes(nWidth, nHeight * height_lsb_mul);
		SrcPlanes =  new YUY2Planes(nWidth, nHeight);
   }

   dstShortPitch = ((nWidth + 15)/16)*16;
	dstIntPitch = dstShortPitch;
   if (nOverlapX >0 || nOverlapY>0)
   {
		OverWins = new OverlapWindows(nBlkSizeX, nBlkSizeY, nOverlapX, nOverlapY);
		OverWinsUV = new OverlapWindows(nBlkSizeX/2, nBlkSizeY/yRatioUV, nOverlapX/2, nOverlapY/yRatioUV);
		if (lsb_flag)
		{
			DstInt = new int [dstIntPitch * nHeight];
		}
		else
		{
			DstShort = new unsigned short[dstShortPitch*nHeight];
		}
   }

	switch (nBlkSizeX)
      {
      case 32:
      if (nBlkSizeY==16) {          OVERSLUMALSB = OverlapsLsb_C<32,16>; 
		 if (yRatioUV==2) {	        OVERSCHROMALSB = OverlapsLsb_C<16,8>;  }
		 else {	                    OVERSCHROMALSB = OverlapsLsb_C<16,16>; }
      } else if (nBlkSizeY==32) {    OVERSLUMALSB = OverlapsLsb_C<32,32>;
		 if (yRatioUV==2) {	        OVERSCHROMALSB = OverlapsLsb_C<16,16>;		 }
		 else {	                    OVERSCHROMALSB = OverlapsLsb_C<16,32>;		 }
      } break;
      case 16:
      if (nBlkSizeY==16) {          OVERSLUMALSB = OverlapsLsb_C<16,16>; 
		 if (yRatioUV==2) {	        OVERSCHROMALSB = OverlapsLsb_C<8,8>;   }
		 else {	                    OVERSCHROMALSB = OverlapsLsb_C<8,16>;  }
      } else if (nBlkSizeY==8) {    OVERSLUMALSB = OverlapsLsb_C<16,8>;  
		 if (yRatioUV==2) {	        OVERSCHROMALSB = OverlapsLsb_C<8,4>;   }
		 else {	                    OVERSCHROMALSB = OverlapsLsb_C<8,8>;   }
      } else if (nBlkSizeY==2) {    OVERSLUMALSB = OverlapsLsb_C<16,2>;  
		 if (yRatioUV==2) {	        OVERSCHROMALSB = OverlapsLsb_C<8,1>;   }
		 else {	                    OVERSCHROMALSB = OverlapsLsb_C<8,2>;   }
      }
         break;
      case 4:
                                    OVERSLUMALSB = OverlapsLsb_C<4,4>;   
		 if (yRatioUV==2) {			OVERSCHROMALSB = OverlapsLsb_C<2,2>;  }
		 else {			            OVERSCHROMALSB = OverlapsLsb_C<2,4>;  }
         break;
      case 8:
      default:
      if (nBlkSizeY==8) {           OVERSLUMALSB = OverlapsLsb_C<8,8>;   
		 if (yRatioUV==2) {	        OVERSCHROMALSB = OverlapsLsb_C<4,4>;   }
		 else {	                    OVERSCHROMALSB = OverlapsLsb_C<4,8>;   }
      }else if (nBlkSizeY==4) {     OVERSLUMALSB = OverlapsLsb_C<8,4>;   
		 if (yRatioUV==2) {	        OVERSCHROMALSB = OverlapsLsb_C<4,2>;   }
		 else {	                    OVERSCHROMALSB = OverlapsLsb_C<4,4>;   }
      }
      }

   if ( ((env->GetCPUFlags() & CPUF_SSE2) != 0) & isse)
   {
	switch (nBlkSizeX)
      {
      case 32:
      if (nBlkSizeY==16) {          OVERSLUMA = Overlaps32x16_sse2;  DEGRAINLUMA = Degrain1_sse2<32,16>;
		 if (yRatioUV==2) {         OVERSCHROMA = Overlaps16x8_sse2; DEGRAINCHROMA = Degrain1_sse2<16,8>;		 }
		 else {                     OVERSCHROMA = Overlaps16x16_sse2;DEGRAINCHROMA = Degrain1_sse2<16,16>;		 }
      } else if (nBlkSizeY==32) {    OVERSLUMA = Overlaps32x32_sse2;  DEGRAINLUMA = Degrain1_sse2<32,32>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps16x16_sse2; DEGRAINCHROMA = Degrain1_sse2<16,16>;		 }
		 else {	                    OVERSCHROMA = Overlaps16x32_sse2; DEGRAINCHROMA = Degrain1_sse2<16,32>;		 }
      } break;
      case 16:
      if (nBlkSizeY==16) {          OVERSLUMA = Overlaps16x16_sse2; DEGRAINLUMA = Degrain1_sse2<16,16>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps8x8_sse2; DEGRAINCHROMA = Degrain1_sse2<8,8>;		 }
		 else {	                    OVERSCHROMA = Overlaps8x16_sse2;DEGRAINCHROMA = Degrain1_sse2<8,16>;		 }
      } else if (nBlkSizeY==8) {    OVERSLUMA = Overlaps16x8_sse2;  DEGRAINLUMA = Degrain1_sse2<16,8>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps8x4_sse2; DEGRAINCHROMA = Degrain1_sse2<8,4>;		 }
		 else {	                    OVERSCHROMA = Overlaps8x8_sse2; DEGRAINCHROMA = Degrain1_sse2<8,8>;		 }
      } else if (nBlkSizeY==2) {    OVERSLUMA = Overlaps16x2_sse2;  DEGRAINLUMA = Degrain1_sse2<16,2>;
		 if (yRatioUV==2) {         OVERSCHROMA = Overlaps8x1_sse2; DEGRAINCHROMA = Degrain1_sse2<8,1>;		 }
		 else {	                    OVERSCHROMA = Overlaps8x2_sse2; DEGRAINCHROMA = Degrain1_sse2<8,2>;		 }
      }
         break;
      case 4:
                                    OVERSLUMA = Overlaps4x4_sse2;    DEGRAINLUMA = Degrain1_mmx<4,4>;
		 if (yRatioUV==2) {			OVERSCHROMA = Overlaps_C<2,2>;	DEGRAINCHROMA = Degrain1_C<2,2>;		 }
		 else {			            OVERSCHROMA = Overlaps_C<2,4>;    DEGRAINCHROMA = Degrain1_C<2,4>;		 }
         break;
      case 8:
      default:
      if (nBlkSizeY==8) {           OVERSLUMA = Overlaps8x8_sse2;    DEGRAINLUMA = Degrain1_sse2<8,8>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps4x4_sse2;  DEGRAINCHROMA = Degrain1_mmx<4,4>;		 }
		 else {	                    OVERSCHROMA = Overlaps4x8_sse2;  DEGRAINCHROMA = Degrain1_mmx<4,8>;		 }
      }else if (nBlkSizeY==4) {     OVERSLUMA = Overlaps8x4_sse2;	DEGRAINLUMA = Degrain1_sse2<8,4>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps4x2_sse2;	DEGRAINCHROMA = Degrain1_mmx<4,2>;		 }
		 else {	                    OVERSCHROMA = Overlaps4x4_sse2;  DEGRAINCHROMA = Degrain1_mmx<4,4>;		 }
      }
      }
   }
   else if ( isse )
   {
	switch (nBlkSizeX)
      {
      case 32:
      if (nBlkSizeY==16) {          OVERSLUMA = Overlaps32x16_sse2;  DEGRAINLUMA = Degrain1_mmx<32,16>;
		 if (yRatioUV==2) {         OVERSCHROMA = Overlaps16x8_sse2; DEGRAINCHROMA = Degrain1_mmx<16,8>;		 }
		 else {                     OVERSCHROMA = Overlaps16x16_sse2;DEGRAINCHROMA = Degrain1_mmx<16,16>;		 }
      } else if (nBlkSizeY==32) {    OVERSLUMA = Overlaps32x32_sse2;  DEGRAINLUMA = Degrain1_mmx<32,32>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps16x16_sse2; DEGRAINCHROMA = Degrain1_mmx<16,16>;		 }
		 else {	                    OVERSCHROMA = Overlaps16x32_sse2; DEGRAINCHROMA = Degrain1_mmx<16,32>;		 }
      } break;
      case 16:
      if (nBlkSizeY==16) {          OVERSLUMA = Overlaps16x16_sse2; DEGRAINLUMA = Degrain1_mmx<16,16>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps8x8_sse2; DEGRAINCHROMA = Degrain1_mmx<8,8>;		 }
		 else {	                    OVERSCHROMA = Overlaps8x16_sse2;DEGRAINCHROMA = Degrain1_mmx<8,16>;		 }
      } else if (nBlkSizeY==8) {    OVERSLUMA = Overlaps16x8_sse2;  DEGRAINLUMA = Degrain1_mmx<16,8>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps8x4_sse2; DEGRAINCHROMA = Degrain1_mmx<8,4>;		 }
		 else {	                    OVERSCHROMA = Overlaps8x8_sse2; DEGRAINCHROMA = Degrain1_mmx<8,8>;		 }
      } else if (nBlkSizeY==2) {    OVERSLUMA = Overlaps16x2_sse2;  DEGRAINLUMA = Degrain1_mmx<16,2>;
		 if (yRatioUV==2) {         OVERSCHROMA = Overlaps8x1_sse2; DEGRAINCHROMA = Degrain1_mmx<8,1>;		 }
		 else {	                    OVERSCHROMA = Overlaps8x2_sse2; DEGRAINCHROMA = Degrain1_mmx<8,2>;		 }
      }
         break;
      case 4:
                                    OVERSLUMA = Overlaps4x4_sse2;    DEGRAINLUMA = Degrain1_mmx<4,4>;
		 if (yRatioUV==2) {			OVERSCHROMA = Overlaps_C<2,2>;	DEGRAINCHROMA = Degrain1_C<2,2>;		 }
		 else {			            OVERSCHROMA = Overlaps_C<2,4>;    DEGRAINCHROMA = Degrain1_C<2,4>;		 }
         break;
      case 8:
      default:
      if (nBlkSizeY==8) {           OVERSLUMA = Overlaps8x8_sse2;    DEGRAINLUMA = Degrain1_mmx<8,8>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps4x4_sse2;  DEGRAINCHROMA = Degrain1_mmx<4,4>;		 }
		 else {	                    OVERSCHROMA = Overlaps4x8_sse2;  DEGRAINCHROMA = Degrain1_mmx<4,8>;		 }
      }else if (nBlkSizeY==4) {     OVERSLUMA = Overlaps8x4_sse2;	DEGRAINLUMA = Degrain1_mmx<8,4>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps4x2_sse2;	DEGRAINCHROMA = Degrain1_mmx<4,2>;		 }
		 else {	                    OVERSCHROMA = Overlaps4x4_sse2;  DEGRAINCHROMA = Degrain1_mmx<4,4>;		 }
      }
      }
   }
   else
   {
	switch (nBlkSizeX)
      {
      case 32:
      if (nBlkSizeY==16) {          OVERSLUMA = Overlaps_C<32,16>;  DEGRAINLUMA = Degrain1_C<32,16>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps_C<16,8>; DEGRAINCHROMA = Degrain1_C<16,8>;		 }
		 else {	                    OVERSCHROMA = Overlaps_C<16,16>;DEGRAINCHROMA = Degrain1_C<16,16>;		 }
      } else if (nBlkSizeY==32) {    OVERSLUMA = Overlaps_C<32,32>;   DEGRAINLUMA = Degrain1_C<32,32>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps_C<16,16>;  DEGRAINCHROMA = Degrain1_C<16,16>;		 }
		 else {	                    OVERSCHROMA = Overlaps_C<16,32>;  DEGRAINCHROMA = Degrain1_C<16,32>;		 }
      } break;
      case 16:
      if (nBlkSizeY==16) {          OVERSLUMA = Overlaps_C<16,16>;  DEGRAINLUMA = Degrain1_C<16,16>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps_C<8,8>;  DEGRAINCHROMA = Degrain1_C<8,8>;		 }
		 else {	                    OVERSCHROMA = Overlaps_C<8,16>; DEGRAINCHROMA = Degrain1_C<8,16>;		 }
      } else if (nBlkSizeY==8) {    OVERSLUMA = Overlaps_C<16,8>;   DEGRAINLUMA = Degrain1_C<16,8>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps_C<8,4>;  DEGRAINCHROMA = Degrain1_C<8,4>;		 }
		 else {	                    OVERSCHROMA = Overlaps_C<8,8>;  DEGRAINCHROMA = Degrain1_C<8,8>;		 }
      } else if (nBlkSizeY==2) {    OVERSLUMA = Overlaps_C<16,2>;   DEGRAINLUMA = Degrain1_C<16,2>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps_C<8,1>;  DEGRAINCHROMA = Degrain1_C<8,1>;		 }
		 else {	                    OVERSCHROMA = Overlaps_C<8,2>;  DEGRAINCHROMA = Degrain1_C<8,2>;		 }
      }
         break;
      case 4:
                                    OVERSLUMA = Overlaps_C<4,4>;    DEGRAINLUMA = Degrain1_C<4,4>;
		 if (yRatioUV==2) {			OVERSCHROMA = Overlaps_C<2,2>;  DEGRAINCHROMA = Degrain1_C<2,2>;		 }
		 else {			            OVERSCHROMA = Overlaps_C<2,4>;  DEGRAINCHROMA = Degrain1_C<2,4>;		 }
         break;
      case 8:
      default:
      if (nBlkSizeY==8) {           OVERSLUMA = Overlaps_C<8,8>;    DEGRAINLUMA = Degrain1_C<8,8>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps_C<4,4>;  DEGRAINCHROMA = Degrain1_C<4,4>;		 }
		 else {	                    OVERSCHROMA = Overlaps_C<4,8>;  DEGRAINCHROMA = Degrain1_C<4,8>;		 }
      }else if (nBlkSizeY==4) {     OVERSLUMA = Overlaps_C<8,4>;    DEGRAINLUMA = Degrain1_C<8,4>;
		 if (yRatioUV==2) {	        OVERSCHROMA = Overlaps_C<4,2>;  DEGRAINCHROMA = Degrain1_C<4,2>;		 }
		 else {	                    OVERSCHROMA = Overlaps_C<4,4>;  DEGRAINCHROMA = Degrain1_C<4,4>;		 }
      }
      }
   }

	const int		tmp_size = 32 * 32;
	tmpBlock = new BYTE[tmp_size * height_lsb_mul];
	tmpBlockLsb = (lsb_flag) ? (tmpBlock + tmp_size) : 0;

	if (lsb_flag)
	{
		vi.height <<= 1;
	}
}
예제 #22
0
MVFlowInter::MVFlowInter(PClip _child, PClip super, PClip _mvbw, PClip _mvfw,  int _time256, double _ml,
                           bool _blend, int nSCD1, int nSCD2, bool _isse, bool _planar, IScriptEnvironment* env) :
GenericVideoFilter(_child),
MVFilter(_mvfw, "MFlowInter", env),
mvClipB(_mvbw, nSCD1, nSCD2, env),
mvClipF(_mvfw, nSCD1, nSCD2, env)
{
   time256 = _time256;
   ml = _ml;
   isse = _isse;
   planar = planar;
   blend = _blend;

	if (!mvClipB.IsBackward())
			env->ThrowError("MFlowInter: wrong backward vectors");
	if (mvClipF.IsBackward())
			env->ThrowError("MFlowInter: wrong forward vectors");

   CheckSimilarity(mvClipB, "mvbw", env);
   CheckSimilarity(mvClipF, "mvfw", env);

        SuperParams64Bits params;
        memcpy(&params, &super->GetVideoInfo().num_audio_samples, 8);
        int nHeightS = params.nHeight;
        int nSuperHPad = params.nHPad;
        int nSuperVPad = params.nVPad;
        int nSuperPel = params.nPel;
        int nSuperModeYUV = params.nModeYUV;
        int nSuperLevels = params.nLevels;
        int nSuperWidth = super->GetVideoInfo().width; // really super
        int nSuperHeight = super->GetVideoInfo().height;

        if (nHeight != nHeightS || nWidth != nSuperWidth-nSuperHPad*2)
                env->ThrowError("MFlowInter : wrong super frame clip");

    if (nPel==1)
        finest = super; // v2.0.9.1
    else
    {
    finest = new MVFinest(super, isse, env);
    AVSValue cache_args[1] = { finest };
    finest = env->Invoke("InternalCache", AVSValue(cache_args,1)).AsClip(); // add cache for speed
    }

//   if (nWidth  != vi.width || (nWidth + nHPadding*2)*nPel != finest->GetVideoInfo().width ||
//       nHeight  != vi.height || (nHeight + nVPadding*2)*nPel != finest->GetVideoInfo().height )
//			env->ThrowError("MVFlowInter: wrong source or finest frame size");

	 // may be padded for full frame cover
	 nBlkXP = (nBlkX*(nBlkSizeX - nOverlapX) + nOverlapX < nWidth) ? nBlkX+1 : nBlkX;
	 nBlkYP = (nBlkY*(nBlkSizeY - nOverlapY) + nOverlapY < nHeight) ? nBlkY+1 : nBlkY;
	 nWidthP = nBlkXP*(nBlkSizeX - nOverlapX) + nOverlapX;
	 nHeightP = nBlkYP*(nBlkSizeY - nOverlapY) + nOverlapY;
	 // for YV12
	 nWidthPUV = nWidthP/2;
	 nHeightPUV = nHeightP/yRatioUV;
	 nHeightUV = nHeight/yRatioUV;
	 nWidthUV = nWidth/2;

	 nHPaddingUV = nHPadding/2;
	 nVPaddingUV = nVPadding/yRatioUV;

	 VPitchY = (nWidthP + 15) & (~15);
	 VPitchUV = (nWidthPUV + 15) & (~15);

 	 VXFullYB = new BYTE [nHeightP*VPitchY];
	 VXFullUVB = new BYTE [nHeightPUV*VPitchUV];
 	 VYFullYB = new BYTE [nHeightP*VPitchY];
	 VYFullUVB = new BYTE [nHeightPUV*VPitchUV];

	 VXFullYF = new BYTE [nHeightP*VPitchY];
	 VXFullUVF = new BYTE [nHeightPUV*VPitchUV];
 	 VYFullYF = new BYTE [nHeightP*VPitchY];
	 VYFullUVF = new BYTE [nHeightPUV*VPitchUV];

  	 VXSmallYB = new BYTE [nBlkXP*nBlkYP];
  	 VYSmallYB = new BYTE [nBlkXP*nBlkYP];
	 VXSmallUVB = new BYTE [nBlkXP*nBlkYP];
	 VYSmallUVB = new BYTE [nBlkXP*nBlkYP];

  	 VXSmallYF = new BYTE [nBlkXP*nBlkYP];
  	 VYSmallYF = new BYTE [nBlkXP*nBlkYP];
	 VXSmallUVF = new BYTE [nBlkXP*nBlkYP];
	 VYSmallUVF = new BYTE [nBlkXP*nBlkYP];

 	 VXFullYBB = new BYTE [nHeightP*VPitchY];
	 VXFullUVBB = new BYTE [nHeightPUV*VPitchUV];
 	 VYFullYBB = new BYTE [nHeightP*VPitchY];
	 VYFullUVBB = new BYTE [nHeightPUV*VPitchUV];

	 VXFullYFF = new BYTE [nHeightP*VPitchY];
	 VXFullUVFF = new BYTE [nHeightPUV*VPitchUV];
 	 VYFullYFF = new BYTE [nHeightP*VPitchY];
	 VYFullUVFF = new BYTE [nHeightPUV*VPitchUV];

  	 VXSmallYBB = new BYTE [nBlkXP*nBlkYP];
  	 VYSmallYBB = new BYTE [nBlkXP*nBlkYP];
	 VXSmallUVBB = new BYTE [nBlkXP*nBlkYP];
	 VYSmallUVBB = new BYTE [nBlkXP*nBlkYP];

  	 VXSmallYFF = new BYTE [nBlkXP*nBlkYP];
  	 VYSmallYFF = new BYTE [nBlkXP*nBlkYP];
	 VXSmallUVFF = new BYTE [nBlkXP*nBlkYP];
	 VYSmallUVFF = new BYTE [nBlkXP*nBlkYP];

	 MaskSmallB = new BYTE [nBlkXP*nBlkYP];
	 MaskFullYB = new BYTE [nHeightP*VPitchY];
	 MaskFullUVB = new BYTE [nHeightPUV*VPitchUV];

	 MaskSmallF = new BYTE [nBlkXP*nBlkYP];
	 MaskFullYF = new BYTE [nHeightP*VPitchY];
	 MaskFullUVF = new BYTE [nHeightPUV*VPitchUV];

	 SADMaskSmallB = new BYTE [nBlkXP*nBlkYP];
	 SADMaskSmallF = new BYTE [nBlkXP*nBlkYP];


	 int CPUF_Resize = env->GetCPUFlags();
	 if (!isse) CPUF_Resize = (CPUF_Resize & !CPUF_INTEGER_SSE) & !CPUF_SSE2;

	 upsizer = new SimpleResize(nWidthP, nHeightP, nBlkXP, nBlkYP, CPUF_Resize);
	 upsizerUV = new SimpleResize(nWidthPUV, nHeightPUV, nBlkXP, nBlkYP, CPUF_Resize);

	 LUTVB = new int[256];
	 LUTVF = new int[256];
	Create_LUTV(time256, LUTVB, LUTVF);

	if ( (pixelType & VideoInfo::CS_YUY2) == VideoInfo::CS_YUY2 && !planar)
   {
		DstPlanes =  new YUY2Planes(nWidth, nHeight);
   }

}
예제 #23
0
PVideoFrame __stdcall SimpleSample::GetFrame(int n, IScriptEnvironment* env) {
// This is the implementation of the GetFrame function.
// See the header definition for further info.

  PVideoFrame src = child->GetFrame(n, env);
   // Request frame 'n' from the child (source) clip.
  PVideoFrame window = WindowVideo->GetFrame(n, env);
  // Request frame "'n" from the WindowVideo clip
  PVideoFrame dst = env->NewVideoFrame(vi);
   // Construct a frame based on the information of the current frame
   // contained in the "vi" struct.

  /* GstAVSynth: copy timestamp from source to destination buffer
   * without modifying it
   */
  dst->SetTimestamp (src->GetTimestamp ());
  
  const unsigned char* srcp = src->GetReadPtr();
  // Request a Read pointer from the source frame.
  // This will return the position of the upperleft pixel in YUY2 images,
  // and return the lower-left pixel in RGB.
  // RGB images are stored upside-down in memory. 
  // You should still process images from line 0 to height.

  unsigned char* dstp = dst->GetWritePtr();
  // Request a Write pointer from the newly created destination image.
  // You can request a writepointer to images that have just been
  // created by NewVideoFrame. If you recieve a frame from PClip->GetFrame(...)
  // you must call env->MakeWritable(&frame) be recieve a valid write pointer.
  
  const int dst_pitch = dst->GetPitch();
  // Requests pitch (length of a line) of the destination image.
  // For more information on pitch see: http://www.avisynth.org/index.php?page=WorkingWithImages
  // (short version - pitch is always equal to or greater than width to allow for seriously fast assembly code)

  const int dst_width = dst->GetRowSize();
  // Requests rowsize (number of used bytes in a line.
  // See the link above for more information.

  const int dst_height = dst->GetHeight();
  // Requests the height of the destination image.

  const int src_pitch = src->GetPitch();
  const int src_width = src->GetRowSize();
  const int src_height = src->GetHeight();

  const unsigned char* windowp=window->GetReadPtr();
  const int window_pitch = window->GetPitch();
  const int window_width = window->GetRowSize();
  const int window_height = window->GetHeight();
  // Get info on the Windowed Clip (see src definitions for more information)

  
  int w, h;

  // This version of SimpleSample is intended to show how to utilise information from 2 clips in YUY2
  // colourspace only.  The original V1.6 code has been left in place fro all other
  // colourspaces.
  // It is designed purely for clarity and not as good or clever code :-)
  

  if (vi.IsRGB24()) {
    // The code just deals with RGB24 colourspace where each pixel is represented by
    // 3 bytes, Blue, Green and Red.
    // Although this colourspace is the easiest to understand, it is very rarely used because
    // a 3 byte sequence (24bits) cannot be processed easily using normal 32 bit registers.
/*    
    for (h=0; h < src_height;h++) {       // Loop from bottom line to top line.
      for (w = 0; w < src_width; w+=3) {   // Loop from left side of the image to the right side 1 pixel (3 bytes) at a time
        // stepping 3 bytes (a pixel width in RGB24 space)
        
        *(dstp + w) = *(srcp + w);          // Copy each Blue byte from source to destination.
        *(dstp + w + 1) = *(srcp + w + 1);     // Copy Green.
        *(dstp + w + 2) = *(srcp + w + 2);    // Copy Red
      }                              
      srcp = srcp + src_pitch; // Add the pitch (note use of pitch and not width) of one line (in bytes) to the source pointer
      dstp = dstp + dst_pitch; // Add the pitch to the destination pointer.
    }
*/
    env->BitBlt(dst->GetWritePtr(), dst->GetPitch(), src->GetReadPtr(), src->GetPitch(), src->GetRowSize(), src->GetHeight());
    // end copy src to dst
    
    //Now draw a white square in the middle of the frame
    // Normally you'd do this code within the loop above but here it is in a separate loop for clarity;
    dstp = dst->GetWritePtr();  // reset the destination pointer to the bottom, left pixel. (RGB colourspaces only)
    dstp = dstp + (dst_height/2 - SquareSize/2)*dst_pitch;  // move pointer to SquareSize/2 lines from the middle of the frame;
    for (h=0; h < SquareSize;h++) { // only scan 100 lines 
      for (w = dst_width/2 - SquareSize*3/2; w < dst_width/2 + SquareSize*3/2; w+=3) { // only scans the middle SquareSize pixels of a line 
        *(dstp + w) = 255;    // Set Blue to maximum value.
        *(dstp + w + 1) = 255;     // and Green.
        *(dstp + w + 2) = 255;    // and Red - therefore the whole pixel is now white.
      }                              
      dstp = dstp + dst_pitch; 
    }
  }

  if (vi.IsRGB32()) {
    // This code deals with RGB32 colourspace where each pixel is represented by
    // 4 bytes, Blue, Green and Red and "spare" byte that could/should be used for alpha
    // keying but usually isn't.

    // Although this colourspace isn't memory efficient, code end ups running much
    // quicker than RGB24 as you can deal with whole 32bit variables at a time
    // and easily work directly and quickly in assembler (if you know how to that is :-)
    
    env->BitBlt(dst->GetWritePtr(), dst->GetPitch(), src->GetReadPtr(), src->GetPitch(), src->GetRowSize(), src->GetHeight());
    // end copy src to dst
    
    //Now draw a white square in the middle of the frame
    // Normally you'd do this code within the loop above but here it is in a separate loop for clarity;
    
    dstp = dst->GetWritePtr();  // reset the destination pointer to the bottom, left pixel. (RGB colourspaces only)
    dstp = dstp + (dst_height/2 - SquareSize/2)*dst_pitch;  // move pointer to SquareSize/2 lines from the middle of the frame;

    int woffset = dst_width/8 - SquareSize/2;  // lets precalulate the width offset like we do for the lines.

    for (h=0; h < SquareSize;h++) { // only scan SquareSize number of lines 
      for (w = 0; w < SquareSize; w+=1) { // only scans the middle SquareSize pixels of a line 
        *((unsigned int *)dstp + woffset + w) = 0x00FFFFFF;    // Set Red,Green and Blue to maximum value in 1 instruction.
                                              // LSB = Blue, MSB = "spare" byte
      }  
      dstp = dstp + dst_pitch; 
    }
  }

  if (vi.IsYUY2()) {
    // This code deals with YUY2 colourspace where each 4 byte sequence represents
    // 2 pixels, (Y1, U, Y2 and then V).

    // This colourspace is more memory efficient than RGB32 but can be more awkward to use sometimes.
    // However, it can still be manipulated 32bits at a time depending on the
    // type of filter you are writing

    // There is no difference in code for this loop and the RGB32 code due to a coincidence :-)
    // 1) YUY2 frame_width is half of an RGB32 one
    // 2) But in YUY2 colourspace, a 32bit variable holds 2 pixels instead of the 1 in RGB32 colourspace.

    env->BitBlt(dst->GetWritePtr(), dst_pitch, src->GetReadPtr(), src_pitch, src_width, src_height);
    // end copy src to dst
    
    //Now draw the other clip inside a square in the middle of the frame
    // Normally you'd do this code within the loop above but here it is in a separate loop for clarity;
  
    dstp = dst->GetWritePtr();  // reset the destination pointer to the top, left pixel. (YUY2 colourspace only)
    dstp = dstp + (dst_height/2 - SquareSize/2)*dst_pitch + dst_width/2 - SquareSize;  // move pointer to SquareSize/2 lines from the middle of the frame;

    windowp = window->GetReadPtr();

    env->BitBlt(dstp, dst_pitch, windowp, window_pitch, SquareSize*2, SquareSize);
  }

  if (vi.IsYV12()) {

    // This code deals with YV12 colourspace where the Y, U and V information are
    // stored in completely separate memory areas

    // This colourspace is the most memory efficient but usually requires 3 separate loops
    // However, it can actually be easier to deal with than YUY2 depending on your filter algorithim
    
    // So first of all deal with the Y Plane

    for (h=0; h < src_height;h++) {       // Loop from top line to bottom line (Sames as YUY2.
      for (w = 0; w < src_width; w++)       // Loop from left side of the image to the right side.
        *(dstp + w) = *(srcp + w);          // Copy each byte from source to destination.
      srcp = srcp + src_pitch;            // Add the pitch (note use of pitch and not width) of one line (in bytes) to the source image.
      dstp = dstp + dst_pitch;            // Add the pitch of one line (in bytes) to the destination.
    }
    // end copy Y Plane src to dst

    //Now set the Y plane bytes to maximum in the middle of the frame
    // Normally you'd do this code within the loop above but here it is in a separate loop for clarity;
    
    dstp = dst->GetWritePtr();  // reset the destination pointer to the top, left pixel.
    dstp = dstp + (dst_height/2 - SquareSize/2)*dst_pitch;  // move pointer to SquareSize/2 lines from the middle of the frame;

    int woffset = dst_width/2 - SquareSize/2;  // lets precalulate the width offset like we do for the lines.

    for (h=0; h < SquareSize;h++) { // only scan SquareSize number of lines 
      for (w = 0; w < SquareSize; w+=1) { // only scans the middle SquareSize pixels of a line
        *(dstp + woffset + w) = 235;    // Set Y values to maximum
      }  
      dstp = dstp + dst_pitch; 
    }
    // end of Y plane Code

    // This section of code deals with the U and V planes of planar formats (e.g. YV12)
    // So first of all we have to get the additional info on the U and V planes

    const int dst_pitchUV = dst->GetPitch(PLANAR_U);  // The pitch,height and width information
    const int dst_widthUV = dst->GetRowSize(PLANAR_U);  // is guaranted to be the same for both
    const int dst_heightUV = dst->GetHeight(PLANAR_U);  // the U and V planes so we only the U
    const int src_pitchUV = src->GetPitch(PLANAR_U);  // plane values and use them for V as
    const int src_widthUV = src->GetRowSize(PLANAR_U);  // well
    const int src_heightUV = src->GetHeight(PLANAR_U);  //
    
    //Copy U plane src to dst
    srcp = src->GetReadPtr(PLANAR_U);
    dstp = dst->GetWritePtr(PLANAR_U);
    
    for (h=0; h < src_heightUV;h++) {
      for (w = 0; w < src_widthUV; w++)
        *(dstp + w) = *(srcp + w);
      srcp = srcp + src_pitchUV;
      dstp = dstp + dst_pitchUV;
    }
    // end copy U plane src to dst

    //Now set the U plane bytes to no colour in the middle of the frame
    // Normally you'd do this code within the loop above but here it is in a separate loop for clarity;
    
    dstp = dst->GetWritePtr(PLANAR_U);  // reset the destination pointer to the top, left pixel.
    dstp = dstp + (dst_heightUV/2 - SquareSize/4)*dst_pitchUV;  // note change in how much we dived SquareSize by
                                          // as the U plane height is half the Y plane

    woffset = dst_widthUV/2 - SquareSize/4;  // And the divisor changes here as well compared to Y plane code.

    for (h=0; h < SquareSize/2;h++) { // only scan SquareSize/2 number of lines (because the U plane height is half the Y)
      for (w = 0; w < SquareSize/2; w+=1) { // only scans the middle SquareSize/2 bytes of a line because ... U=Y/2 :-)
        *(dstp + woffset + w) = 128;    // Set U Value to no colour
      }  
      dstp = dstp + dst_pitchUV; 
    }
    // end of U plane Code


    
    //Copy V plane src to dst
    srcp = src->GetReadPtr(PLANAR_V);
    dstp = dst->GetWritePtr(PLANAR_V);
    
    for (h=0; h < src_heightUV;h++) {
      for (w = 0; w < src_widthUV; w++)
        *(dstp + w) = *(srcp + w);
      srcp = srcp + src_pitchUV;
      dstp = dstp + dst_pitchUV;
    }
    // end copy V plane src to dst

    //Now set the V plane bytes to no colour in the middle of the frame
    // the code is identical to the code for U plane apart from getting the frame start pointer.
    // Normally you'd do this code within the loop above but here it is in a separate loop for clarity;
    
    dstp = dst->GetWritePtr(PLANAR_V);  // reset the destination pointer to the top, left pixel.
    dstp = dstp + (dst_heightUV/2 - SquareSize/4)*dst_pitchUV;  // note change in how much we dived SquareSize by
                                          // as the V plane height is half the Y plane

    woffset = dst_widthUV/2 - SquareSize/4;  // And the divisor changes here as well compared to Y plane code.

    for (h=0; h < SquareSize/2;h++) { // only scan SquareSize/2 number of lines (because the V plane height is half the Y)
      for (w = 0; w < SquareSize/2; w+=1) { // only scans the middle SquareSize/2 bytes of a line because ... V=Y/2 :-)
        *(dstp + woffset + w) = 128;    // Set V Value to no colour
      }  
      dstp = dstp + dst_pitchUV; 
    }
    // end of U plane Code

  }
  
  // As we now are finished processing the image, we return the destination image.
  return dst;
}
예제 #24
0
CAsifClip::CAsifClip(PClip clip, IScriptEnvironment* env) {
    video = clip;
    clipse = env;
    vi = clip->GetVideoInfo();
}
예제 #25
0
MVRecalculate::MVRecalculate(PClip _super, PClip _vectors, int _thSAD, int _smooth, int _blksizex, int _blksizey,
                     int st, int stp, int lambda, bool chroma,
                     int _pnew, int _overlapx, int _overlapy,
					 const char* _outfilename, int _dctmode, int _divide,
					 int _sadx264, bool _isse, IScriptEnvironment* env) :
GenericVideoFilter(_super),
mvClip(_vectors, 999999, 255, env)
{
    outfilename = _outfilename;
    smooth = _smooth;

   	// get parameters of super clip - v2.0
	SuperParams64Bits params;
    memcpy(&params, &child->GetVideoInfo().num_audio_samples, 8);
    int nHeight = params.nHeight;
    int nSuperHPad = params.nHPad;
    int nSuperVPad = params.nVPad;
    int nSuperPel = params.nPel;
    int nSuperModeYUV = params.nModeYUV;
    int nSuperLevels = params.nLevels;

    nModeYUV = chroma ? YUVPLANES : YPLANE;
    if ((nModeYUV & nSuperModeYUV) != nModeYUV)
			env->ThrowError("MRecalculate: super clip does not contain needed color data");

    MVAnalysisData *pAnalyseFilter = reinterpret_cast<MVAnalysisData *>(_vectors->GetVideoInfo().nchannels);

	analysisData.nWidth = pAnalyseFilter->GetWidth();
	analysisData.nHeight = pAnalyseFilter->GetHeight();
	analysisData.pixelType = pAnalyseFilter->GetPixelType();
	analysisData.yRatioUV = (vi.IsYV12()) ? 2 : 1;
	analysisData.xRatioUV = 2; // for YV12 and YUY2, really do not used and assumed to 2

    pSrcGOF = new MVGroupOfFrames(nSuperLevels, analysisData.nWidth, analysisData.nHeight, nSuperPel, nSuperHPad, nSuperVPad, nSuperModeYUV, _isse, analysisData.yRatioUV);
    pRefGOF = new MVGroupOfFrames(nSuperLevels, analysisData.nWidth, analysisData.nHeight, nSuperPel, nSuperHPad, nSuperVPad, nSuperModeYUV, _isse, analysisData.yRatioUV);
    int nSuperWidth = child->GetVideoInfo().width;
    int nSuperHeight = child->GetVideoInfo().height;

//	if (video->GetVideoInfo().width != analysisData.nWidth ||
//	    video->GetVideoInfo().height != analysisData.nHeight ||
//	    video->GetVideoInfo().pixel_type != analysisData.pixelType )
//	        env->ThrowError("MVRecalculate: video must have same properties !");

    if (nHeight != analysisData.nHeight || nSuperWidth - 2*nSuperHPad != analysisData.nWidth)
    		env->ThrowError("MRecalculate : wrong frame size");
	if (vi.pixel_type != analysisData.pixelType )
	        env->ThrowError("MRecalculate: wrong pixel type");

	analysisData.nBlkSizeX = _blksizex;
	analysisData.nBlkSizeY = _blksizey;
	if (( analysisData.nBlkSizeX != 4 || analysisData.nBlkSizeY != 4) &&
        ( analysisData.nBlkSizeX != 8 || analysisData.nBlkSizeY != 4) &&
        ( analysisData.nBlkSizeX != 8 || analysisData.nBlkSizeY != 8 ) &&
        ( analysisData.nBlkSizeX != 16 || analysisData.nBlkSizeY != 2 ) &&
        ( analysisData.nBlkSizeX != 16 || analysisData.nBlkSizeY != 8 ) &&
        ( analysisData.nBlkSizeX != 16 || analysisData.nBlkSizeY != 16 ) &&
        ( analysisData.nBlkSizeX != 32 || analysisData.nBlkSizeY != 16))
		env->ThrowError("MVRecalculate: Block's size must be 4x4, 8x4, 8x8, 16x2, 16x8, 16x16, 32x16");

//	if (!vi.IsYV12() && !vi.IsYUY2())
//		env->ThrowError("MVRecalculate: Clip must be YV12 or YUY2");

//   analysisData.nPel = pel;
//	if (( analysisData.nPel != 1 ) && ( analysisData.nPel != 2 ) && ( analysisData.nPel != 4 ))
//		env->ThrowError("MVRecalculate: pel has to be 1 or 2 or 4");
	analysisData.nPel =	pAnalyseFilter->GetPel();

//   analysisData.nDeltaFrame = df;
//   if ( analysisData.nDeltaFrame < 1 )
//      analysisData.nDeltaFrame = 1;
   analysisData.nDeltaFrame = pAnalyseFilter->GetDeltaFrame();

   if (_overlapx<0 || _overlapx >= _blksizex || _overlapy<0 || _overlapy >= _blksizey)
		env->ThrowError("MRecalculate: overlap must be less than block size");

//   if (_overlapx%2 || (_overlapy%2 >0 && vi.IsYV12()))
   if (_overlapx%2 || (_overlapy%2 >0 && vi.IsYV12()))
		env->ThrowError("MRecalculate: overlap must be more even");

	if (_divide != 0 && (_blksizex < 8 && _blksizey < 8) )
		env->ThrowError("MRecalculate: Block sizes must be 8 or more for divide mode");
   if (_divide != 0 && (_overlapx%4 || (_overlapy%4 >0 && vi.IsYV12() ) || (_overlapy%2 >0 && vi.IsYUY2() )  ))
		env->ThrowError("MRecalculate: overlap must be more even for divide mode");

    divideExtra = _divide;

    headerSize = max(4 + sizeof(analysisData), 256); // include itself, but usually equal to 256 :-)

	analysisData.nOverlapX = _overlapx;
	analysisData.nOverlapY = _overlapy;

	int nBlkX = (analysisData.nWidth - analysisData.nOverlapX) / (analysisData.nBlkSizeX - analysisData.nOverlapX);
//	if (analysisData.nWidth > (analysisData.nBlkSize - analysisData.nOverlap) * nBlkX )
//		nBlkX += 1;

	int nBlkY = (analysisData.nHeight - analysisData.nOverlapY) / (analysisData.nBlkSizeY - analysisData.nOverlapY);
//	if (analysisData.nHeight > (analysisData.nBlkSize - analysisData.nOverlap) * nBlkY )
//		nBlkY += 1;

	analysisData.nBlkX = nBlkX;
	analysisData.nBlkY = nBlkY;
//	int nWidth_B = nBlkX*(analysisData.nBlkSizeX - analysisData.nOverlapX) + analysisData.nOverlapX;
//	int nHeight_B = nBlkY*(analysisData.nBlkSizeY - analysisData.nOverlapY) + analysisData.nOverlapY;

//	analysisData.nLvCount = ilog2(	((nBlkX) > (nBlkY)) ? (nBlkY) : (nBlkX) ) - lv;
//	analysisData.nLvCount = ( analysisData.nLvCount < 1 ) ? 1 : analysisData.nLvCount;
	analysisData.nLvCount = 1;

//	analysisData.isBackward = isb;
	analysisData.isBackward = pAnalyseFilter->IsBackward();


   nLambda = lambda;
//   lsad = _lsad;
   pnew = _pnew;
//   plevel = _plevel;
//   global = _global;

   if (_dctmode == 0)
   {
		hinstFFTW3 = NULL;
		DCTc = 0;
   }
   else
   {
	   if (_isse && (_blksizex == 8) && _blksizey ==8 )
			DCTc = new DCTINT(_blksizex, _blksizey, _dctmode);
	   else
	   {
			hinstFFTW3 = LoadLibrary("fftw3.dll"); // delayed loading
			if (hinstFFTW3==NULL) env->ThrowError("MRecalculate: Can not load FFTW3.DLL !");
			DCTc = new DCTFFTW(_blksizex, _blksizey, hinstFFTW3, _dctmode); // check order x,y
	   }
	}

	switch ( st )
	{
	case 0 :
		searchType = ONETIME;
		nSearchParam = ( stp < 1 ) ? 1 : stp;
		break;
	case 1 :
		searchType = NSTEP;
		nSearchParam = ( stp < 0 ) ? 0 : stp;
		break;
	case 3 :
		searchType = EXHAUSTIVE;
		nSearchParam = ( stp < 1 ) ? 1 : stp;
		break;
	case 2 :
	default :
		searchType = LOGARITHMIC;
		nSearchParam = ( stp < 1 ) ? 1 : stp;
	}

//    nPelSearch = ( _pelSearch < analysisData.nPel) ? analysisData.nPel : _pelSearch; // not below value of pel at finest level

//	analysisData.pixelType = vi.pixel_type;

   analysisData.nFlags = 0;
   analysisData.nFlags |= _isse ? MOTION_USE_ISSE : 0;
   analysisData.nFlags |= analysisData.isBackward ? MOTION_IS_BACKWARD : 0;
   analysisData.nFlags |= chroma ? MOTION_USE_CHROMA_MOTION : 0;
/*
#define CPU_CACHELINE_32   0x00001000
#define CPU_CACHELINE_64   0x00002000
#define CPU_MMX            0x00004000
#define CPU_MMXEXT         0x00008000
#define CPU_SSE            0x00010000
#define CPU_SSE2           0x00020000
#define CPU_SSE2_IS_SLOW   0x00040000
#define CPU_SSE2_IS_FAST   0x00080000
#define CPU_SSE3           0x00100000
#define CPU_SSSE3          0x00200000
#define CPU_PHADD_IS_FAST  0x00400000
#define CPU_SSE4           0x00800000
//force MVRecalculate to use a different function for SAD / SADCHROMA (debug)
#define MOTION_USE_SSD     0x01000000
#define MOTION_USE_SATD    0x02000000
*/
	if (_sadx264 == 0)
	{
		analysisData.nFlags |= cpu_detect();
	}
	else
	{
		if ((_sadx264 > 0)&&(_sadx264 <= 12))
		{
			//force specific function
			analysisData.nFlags |= CPU_MMXEXT;
			analysisData.nFlags |= (_sadx264 == 2) ? CPU_CACHELINE_32 : 0;
			analysisData.nFlags |= ((_sadx264 == 3)||(_sadx264 == 5)||(_sadx264 == 7)) ? CPU_CACHELINE_64 : 0;
			analysisData.nFlags |= ((_sadx264 == 4)||(_sadx264 == 5)||(_sadx264 ==10)) ? CPU_SSE2_IS_FAST : 0;
			analysisData.nFlags |= (_sadx264 == 6) ? CPU_SSE3 : 0;
			analysisData.nFlags |= ((_sadx264 == 7)||(_sadx264 >=11)) ? CPU_SSSE3 : 0;
			//beta (debug)
			analysisData.nFlags |= (_sadx264 == 8) ? MOTION_USE_SSD : 0;
			analysisData.nFlags |= ((_sadx264 >= 9)&&(_sadx264 <= 12)) ? MOTION_USE_SATD : 0;
			analysisData.nFlags |= (_sadx264 ==12) ? CPU_PHADD_IS_FAST : 0;
		}
	}

//	analysisData.usePelClip = false;
//   if (pelclip && (analysisData.nPel >= 2))
//   {
//		if (pelclip->GetVideoInfo().width != video->GetVideoInfo().width*analysisData.nPel || pelclip->GetVideoInfo().height != video->GetVideoInfo().height*analysisData.nPel)
//			env->ThrowError("MVRecalculate: pelclip frame size must be Pel of source!");
//		else
//			analysisData.usePelClip = true;
//   }

	vectorFields = new GroupOfPlanes(analysisData.nWidth, analysisData.nHeight, analysisData.nBlkSizeX, analysisData.nBlkSizeY,
		analysisData.nLvCount, analysisData.nPel, analysisData.nFlags,
		analysisData.nOverlapX, analysisData.nOverlapY, analysisData.nBlkX, analysisData.nBlkY, analysisData.yRatioUV, divideExtra);

//   analysisData.nIdx = _idx;

   analysisData.nMagicKey = MOTION_MAGIC_KEY;

//   analysisData.pmvCore = &mvCore;
//   analysisData.pmvCore = pAnalyseFilter->GetMVCore();//&mvCore;
//   mvCore = analysisData.pmvCore;

   analysisData.nHPadding = nSuperHPad;
   analysisData.nVPadding = nSuperVPad;

//   mvCore->AddFrames(analysisData.nIdx, analysisData.nDeltaFrame*2+1, analysisData.nLvCount,
//	   analysisData.nWidth, analysisData.nHeight, analysisData.nPel, analysisData.nHPadding,
//	   analysisData.nVPadding, YUVPLANES, _isse, analysisData.yRatioUV);


	analysisData.nVersion = MVANALYSIS_DATA_VERSION; // MVAnalysisData and outfile format version: last update v1.8.1

	if (lstrlen(outfilename) > 0) {
		outfile = fopen(outfilename,"wb");
		if (outfile == NULL)
			env->ThrowError("MRecalculate: out file can not be created!");
		else
		{
			fwrite( &analysisData, sizeof(analysisData), 1, outfile );
			outfilebuf = new short[nBlkX*nBlkY*4]; // short vx, short vy, int SAD = 4 words = 8 bytes per block
		}
	 }
	 else {
		 outfile = NULL;
		 outfilebuf = NULL;
	 }

//	analysisData.sharp = _sharp; // pel2 interpolation type

    // vector steam packed in
	vi.height = 1;
	vi.width = headerSize/sizeof(int) + vectorFields->GetArraySize(); //v1.8.1
	vi.pixel_type = VideoInfo::CS_BGR32;
    vi.audio_samples_per_second = 0; //v1.8.1

	if (divideExtra) { //v1.8.1
        memcpy(&analysisDataDivided, &analysisData, sizeof(analysisData));
        analysisDataDivided.nBlkX = analysisData.nBlkX * 2;
        analysisDataDivided.nBlkY = analysisData.nBlkY * 2;
        analysisDataDivided.nBlkSizeX = analysisData.nBlkSizeX / 2;
        analysisDataDivided.nBlkSizeY = analysisData.nBlkSizeY / 2;
        analysisDataDivided.nOverlapX = analysisData.nOverlapX / 2;
        analysisDataDivided.nOverlapY = analysisData.nOverlapY / 2;
        analysisDataDivided.nLvCount = analysisData.nLvCount + 1;
        vi.nchannels = reinterpret_cast<int>(&analysisDataDivided);
//        analysisDataDivided.pmvCore = mvCore;
	}
	else
	{

   //  we'll transmit to the processing filters a handle
   // on the analyzing filter itself ( it's own pointer ), in order
   // to activate the right parameters.
        vi.nchannels = reinterpret_cast<int>(&analysisData);
	}

   if ( chroma ) // normalize threshold to block size
      thSAD = _thSAD * (analysisData.nBlkSizeX * analysisData.nBlkSizeY) / (8 * 8) * (1 + analysisData.yRatioUV) / analysisData.yRatioUV;
   else
      thSAD = _thSAD * (analysisData.nBlkSizeX * analysisData.nBlkSizeY) / (8 * 8);


}
예제 #26
0
AVSValue __cdecl Create_flash3kyuu_deband(AVSValue args, void* user_data, IScriptEnvironment* env){
    PClip child = ARG(child).AsClip();
    const VideoInfo& vi = child->GetVideoInfo();
    check_video_format("f3kdb", vi, env);

    SYSTEM_INFO si;
    memset(&si, 0, sizeof(si));
    GetSystemInfo(&si);
    bool mt = ARG(mt).AsBool(si.dwNumberOfProcessors > 1);

    f3kdb_params_t params;
    f3kdb_params_init_defaults(&params);

    if (F3KDB_ARG(preset).Defined()) {
        int result = f3kdb_params_fill_preset(&params, F3KDB_ARG(preset).AsString());
        if (result != F3KDB_SUCCESS) {
            env->ThrowError("f3kdb: Invalid preset (code: %d)", result);
        }
    }

    f3kdb_params_from_avs(args, &params);
    f3kdb_params_sanitize(&params);

    f3kdb_video_info_t video_info;
    video_info.num_frames = vi.num_frames;
    video_info.pixel_mode = (PIXEL_MODE)ARG(input_mode).AsInt(DEFAULT_PIXEL_MODE);
    video_info.depth = ARG(input_depth).AsInt(-1);
    video_info.chroma_width_subsampling  = vi.IsY8() ? 0 : vi.GetPlaneWidthSubsampling(PLANAR_U);
    video_info.chroma_height_subsampling = vi.IsY8() ? 0 : vi.GetPlaneHeightSubsampling(PLANAR_U);
    f3kdb_video_info_sanitize(&video_info);

    video_info.width = vi.width;
    if (video_info.pixel_mode == HIGH_BIT_DEPTH_INTERLEAVED)
    {
        int width_mod = 2 << video_info.chroma_width_subsampling;
        if (video_info.width % width_mod != 0)
        {
            env->ThrowError("f3kdb: The clip does not appear to be an interleaved high bit depth clip. (width MOD)");
        }
        video_info.width /= 2;
    }

    video_info.height = vi.height;
    if (video_info.pixel_mode == HIGH_BIT_DEPTH_STACKED)
    {
        int height_mod = 2 << video_info.chroma_height_subsampling;
        if (video_info.height % height_mod != 0)
        {
            env->ThrowError("f3kdb: The clip does not appear to be an stacked high bit depth clip. (height MOD)");
        }
        video_info.height /= 2;
    }

    f3kdb_core_t* core = NULL;
    char error_msg[1024];
    memset(error_msg, 0, sizeof(error_msg));
    int result = f3kdb_create(&video_info, &params, &core, error_msg, sizeof(error_msg) - 1);
    if (result != F3KDB_SUCCESS)
    {
        env->ThrowError("f3kdb: Initialization failed (code: %d). %s", result, error_msg);
    }

    int dst_width = video_info.width;
    if (params.output_mode == HIGH_BIT_DEPTH_INTERLEAVED)
    {
        dst_width *= 2;
    }

    int dst_height = video_info.height;
    if (params.output_mode == HIGH_BIT_DEPTH_STACKED)
    {
        dst_height *= 2;
    }
    
    return new f3kdb_avisynth(child, core, dst_width, dst_height, mt);
}
예제 #27
0
int AvxContext::OutputVideo() {
	FILE *sink;
	unsigned char *writeBuffer = NULL;
	sighandler_t old_sigpipe = signal(SIGPIPE, SIG_IGN);

	if (launchMPlayer) {
		char command[1024];
		if (MPlayerCommandVideo(command))
			return -1;
		AVXLOG_INFO("MPlayer command line: %s", command);

		sink = popen(command, "w");
		if (!sink) {
			AVXLOG_ERROR("%s", "Error starting mplayer");
			return -1;
		}
	} else {
		sink = stdout;
	}

	writeBuffer = (unsigned char *)malloc(vi.RowSize() * vi.height);
	if (!writeBuffer) {
		AVXLOG_ERROR("%s", "Unable to allocate memory");
		goto fail;
	}

	try {
		for (int i = 0; i < vi.num_frames; ++i) {
			if (launchMPlayer && (feof(sink) || ferror(sink))) {
				AVXLOG_ERROR("%s", "mplayer process exited");
				break;
			}
			PVideoFrame frame = clip->GetFrame(i, avx_library.env);
			if (vi.IsPlanar()) { // Check plane count in 2.6.
				int planes[] = {PLANAR_Y, PLANAR_V, PLANAR_U};
				for (int j = 0; j < 3; ++j) {
					int plane = planes[j];
					int src_pitch = frame->GetPitch(plane);
					int row_size = frame->GetRowSize(plane);
					int height = frame->GetHeight(plane);
					const unsigned char *srcp = frame->GetReadPtr(plane);

					avx_library.env->BitBlt(writeBuffer, row_size, srcp, src_pitch, row_size, height);
					fwrite(writeBuffer, 1, row_size * height, sink);
				}
			} else {
				int src_pitch = frame->GetPitch();
				int row_size = frame->GetRowSize();
				int height = frame->GetHeight();
				const unsigned char *srcp = frame->GetReadPtr();

				avx_library.env->BitBlt(writeBuffer, row_size, srcp, src_pitch, row_size, height);
				fwrite(writeBuffer, 1, row_size * height, sink);
			}
		}
	} catch (AvisynthError &e) {
		AVXLOG_ERROR("AvisynthError: %s", e.msg);
		goto fail;
	}

	free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return 0;

fail:
	if (writeBuffer)
		free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return -1;
};
예제 #28
0
MVFlowBlur::MVFlowBlur(PClip _child, PClip super, PClip _mvbw, PClip _mvfw,  int _blur256, int _prec,
                           int nSCD1, int nSCD2, bool _isse, bool _planar, IScriptEnvironment* env) :
GenericVideoFilter(_child),
MVFilter(_mvfw, "MFlowBlur", env, 1, 0),
mvClipB(_mvbw, nSCD1, nSCD2, env, 1, 0),
mvClipF(_mvfw, nSCD1, nSCD2, env, 1, 0)
{
   blur256 = _blur256;
   prec = _prec;
   isse = _isse;
   planar = _planar;

   CheckSimilarity(mvClipB, "mvbw", env);
   CheckSimilarity(mvClipF, "mvfw", env);
	SuperParams64Bits params;
	memcpy(&params, &super->GetVideoInfo().num_audio_samples, 8);
	int nHeightS = params.nHeight;
	int nSuperHPad = params.nHPad;
	int nSuperVPad = params.nVPad;
	int nSuperPel = params.nPel;
	int nSuperModeYUV = params.nModeYUV;
	int nSuperLevels = params.nLevels;
	int nSuperWidth = super->GetVideoInfo().width; // really super
	int nSuperHeight = super->GetVideoInfo().height;

	if (   nHeight != nHeightS
	    || nWidth  != nSuperWidth - nSuperHPad * 2
	    || nPel    != nSuperPel)
	{
		env->ThrowError("MFlowBlur : wrong super frame clip");
	}

	if (nPel==1)
		finest = super; // v2.0.9.1
	else
	{
		finest = new MVFinest(super, isse, env);
		AVSValue cache_args[1] = { finest };
		finest = env->Invoke("InternalCache", AVSValue(cache_args,1)).AsClip(); // add cache for speed
	}

//	if (   nWidth  != vi.width  || (nWidth  + nHPadding*2)*nPel != finest->GetVideoInfo().width
//	    || nHeight != vi.height || (nHeight + nVPadding*2)*nPel != finest->GetVideoInfo().height)
//		env->ThrowError("MVFlowBlur: wrong source of finest frame size");


	nHeightUV = nHeight/yRatioUV;
	nWidthUV = nWidth/2;// for YV12
	nHPaddingUV = nHPadding/2;
	nVPaddingUV = nHPadding/yRatioUV;

	VPitchY = nWidth;
	VPitchUV= nWidthUV;

	VXFullYB = new BYTE [nHeight*VPitchY];
	VXFullUVB = new BYTE [nHeightUV*VPitchUV];
	VYFullYB = new BYTE [nHeight*VPitchY];
	VYFullUVB = new BYTE [nHeightUV*VPitchUV];

	VXFullYF = new BYTE [nHeight*VPitchY];
	VXFullUVF = new BYTE [nHeightUV*VPitchUV];
	VYFullYF = new BYTE [nHeight*VPitchY];
	VYFullUVF = new BYTE [nHeightUV*VPitchUV];

	VXSmallYB = new BYTE [nBlkX*nBlkY];
	VYSmallYB = new BYTE [nBlkX*nBlkY];
	VXSmallUVB = new BYTE [nBlkX*nBlkY];
	VYSmallUVB = new BYTE [nBlkX*nBlkY];

	VXSmallYF = new BYTE [nBlkX*nBlkY];
	VYSmallYF = new BYTE [nBlkX*nBlkY];
	VXSmallUVF = new BYTE [nBlkX*nBlkY];
	VYSmallUVF = new BYTE [nBlkX*nBlkY];

	MaskSmallB = new BYTE [nBlkX*nBlkY];
	MaskFullYB = new BYTE [nHeight*VPitchY];
	MaskFullUVB = new BYTE [nHeightUV*VPitchUV];

	MaskSmallF = new BYTE [nBlkX*nBlkY];
	MaskFullYF = new BYTE [nHeight*VPitchY];
	MaskFullUVF = new BYTE [nHeightUV*VPitchUV];

	int CPUF_Resize = env->GetCPUFlags();
	if (!isse) CPUF_Resize = (CPUF_Resize & !CPUF_INTEGER_SSE) & !CPUF_SSE2;

	upsizer = new SimpleResize(nWidth, nHeight, nBlkX, nBlkY, CPUF_Resize);
	upsizerUV = new SimpleResize(nWidthUV, nHeightUV, nBlkX, nBlkY, CPUF_Resize);

	if ( (pixelType & VideoInfo::CS_YUY2) == VideoInfo::CS_YUY2 && !planar)
	{
		DstPlanes =  new YUY2Planes(nWidth, nHeight);
	}
}
예제 #29
0
int main(int argc, TCHAR* argv[]) {
	SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED);

	printf("Usage: filmtester <avs filename> [duplicates_maxlength=2]\n");
	printf("The program plays the AVS file and tests for frame duplicates\n\n");

	int duplicates_maxlength = 2;
	if (argc < 2) {
		printf("No filename specified.\n\n");
		return -1;
	}
	if (argc > 2) {
		duplicates_maxlength = _ttoi(argv[2]);
		printf("INFO: duplicates_maxlength set to %d\n", duplicates_maxlength);
	}

	IScriptEnvironment *env = CreateScriptEnvironment();
	_tprintf(_T("Loading \"%s\" ...\n"), argv[1]);

	LPCSTR arg_names[1] = { nullptr };
	AVSValue arg_vals[1] = { (LPCSTR)argv[1] };
	clip = env->Invoke("import", AVSValue(arg_vals,1), arg_names).AsClip();

	printf("AVS file loaded successfully.\n\n");

	VideoInfo vi = clip->GetVideoInfo();
	printf("VideoInfo:\n");
	printf("-----------\n");
	if (vi.HasVideo()) {
		printf("width x height: %dx%d\n", vi.width, vi.height);
		printf("num_frames: %d\n", vi.num_frames);
		printf("fps: %d/%d\n", vi.fps_numerator, vi.fps_denominator);

		std::string colorspace;
		if (vi.pixel_type & VideoInfo::CS_BGR) colorspace += "BGR, ";
		if (vi.pixel_type & VideoInfo::CS_YUV) colorspace += "YUV, ";
		if (vi.pixel_type & VideoInfo::CS_INTERLEAVED) colorspace += "INTERLEAVED, ";
		if (vi.pixel_type & VideoInfo::CS_PLANAR) colorspace += "PLANAR, ";
		if (colorspace.length() > 0) colorspace.erase(colorspace.length()-2);
		printf("colorspace: %s\n", colorspace.c_str());

		std::string colorformat;
		if (vi.pixel_type & VideoInfo::CS_BGR24) colorformat += "BGR24, ";
		if (vi.pixel_type & VideoInfo::CS_BGR32) colorformat += "BGR32, ";
		if (vi.pixel_type & VideoInfo::CS_YUY2)  colorformat += "YUY2, ";
		if (vi.pixel_type & VideoInfo::CS_YV12)  colorformat += "YV12, ";
		if (vi.pixel_type & VideoInfo::CS_I420)  colorformat += "I420 (IYUV), ";
		if (colorformat.length() > 0)
			colorformat.erase(colorformat.length()-2);
		else
			colorformat = "UNKNOWN";
		printf("colorformat: %s\n", colorformat.c_str());

		std::string imagetype;
		if (vi.image_type & VideoInfo::IT_BFF) imagetype += "BFF, ";
		if (vi.image_type & VideoInfo::IT_TFF) imagetype += "TFF, ";
		if (vi.image_type & VideoInfo::IT_FIELDBASED)  imagetype += "FIELDBASED, ";
		if (imagetype.length() > 0)
			imagetype.erase(imagetype.length()-2);
		else
			imagetype = "UNKNOWN";
		printf("image_type: %s\n", imagetype.c_str());
		printf("bits per pixel: %d\n", vi.BitsPerPixel());
	}
	else
		printf("NO VIDEO\n");

	if (vi.HasAudio()) {
		printf("audio channels: %d\n", vi.nchannels);
		printf("sample_type: %x\n", vi.sample_type);
		printf("samples per second: %d\n", vi.audio_samples_per_second);
		printf("bytes per channel sample: %d\n", vi.BytesPerChannelSample());
		printf("bytes per audio sample: %d\n", vi.BytesPerAudioSample());
		printf("num_audio_samples: %lld\n", vi.num_audio_samples);
	}
	else
		printf("NO AUDIO\n");
	printf("-----------\n\n");

	if (!vi.HasVideo()) {
		printf("Can't start video playback for the sequence without video.\n\n");
		return -1;
	}

	printf("Starting playback ...\n");
	prev_frame = clip->GetFrame(0, env);
	int framesize = prev_frame->GetFrameBuffer()->GetDataSize();
	printf("INFO: framesize = %d bytes.\n\n", framesize);

	InitializeCriticalSection(&cs);
	SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE);

	int error_count = 0;
	int dup_start_frame = 0;
	bool flag_dup = false;
	std::vector<std::pair<int, int>> duplicates;
	for(int i=1; i<vi.num_frames; ++i) {
		EnterCriticalSection(&cs);
		dst = clip->GetFrame(i, env);
		const BYTE *src_ptr = prev_frame->GetFrameBuffer()->GetReadPtr();
		const BYTE *dst_ptr = dst->GetFrameBuffer()->GetReadPtr();
		if (!memcmp(src_ptr, dst_ptr, framesize)) {
			if (!flag_dup) {
				flag_dup = true;
				dup_start_frame = i-1;
			}
		}
		else if (flag_dup) {
			int length = (i-1) - dup_start_frame;
			if (length >= duplicates_maxlength) {
				printf("\rfilmtester: duplication interval: %d..%d" SPACES "\n", dup_start_frame, i-1);
				duplicates.push_back(std::make_pair(dup_start_frame, i-1));
				error_count++;
			}
			flag_dup = false;
		}
		prev_frame = dst;
		LeaveCriticalSection(&cs);
		printf("\r[%5.1f%%] [%d errors] %d/%d frame processing", (float)((i+1)*100)/vi.num_frames, error_count, i+1, vi.num_frames);
	}

	EnterCriticalSection(&cs);
	if (flag_dup) {
		int i = vi.num_frames;
		int length = (i-1) - dup_start_frame;
		if (length >= duplicates_maxlength) {
			printf("\rfilmtester: duplication interval: %d..%d" SPACES "\n", dup_start_frame, i-1);
			duplicates.push_back(std::make_pair(dup_start_frame, i-1));
			error_count++;
		}
		flag_dup = false;
	}
	printf("\rProcessing completed." SPACES "\n\n");
	printf("%d errors\n", error_count);
	printf("\n");
	if (error_count > 0) {
		printf("Erroneous intervals (%d):\n", duplicates.size());
		for(auto it = duplicates.begin(); it != duplicates.end(); ++it)
			printf("%5d .. %d\n", it->first, it->second);
		printf("\n");
	}
	dst = nullptr;
	prev_frame = nullptr;
	clip = nullptr;

	LeaveCriticalSection(&cs);
	DeleteCriticalSection(&cs);
	return error_count;
}