コード例 #1
0
ファイル: perf_pyramids.cpp プロジェクト: AliMiraftab/opencv
PERF_TEST_P(Size_MatType, buildPyramid, testing::Combine(
                testing::Values(sz1080p, sz720p, szVGA, szQVGA, szODD),
                testing::Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC3, CV_32FC4)
                )
            )
{
    Size sz = get<0>(GetParam());
    int matType = get<1>(GetParam());
    int maxLevel = 5;
    const double eps = CV_MAT_DEPTH(matType) <= CV_32S ? 1 : 1e-5;
    perf::ERROR_TYPE error_type = CV_MAT_DEPTH(matType) <= CV_32S ? ERROR_ABSOLUTE : ERROR_RELATIVE;
    Mat src(sz, matType);
    std::vector<Mat> dst(maxLevel);

    declare.in(src, WARMUP_RNG);

    TEST_CYCLE() buildPyramid(src, dst, maxLevel);

    Mat dst0 = dst[0], dst1 = dst[1], dst2 = dst[2], dst3 = dst[3], dst4 = dst[4];

    SANITY_CHECK(dst0, eps, error_type);
    SANITY_CHECK(dst1, eps, error_type);
    SANITY_CHECK(dst2, eps, error_type);
    SANITY_CHECK(dst3, eps, error_type);
    SANITY_CHECK(dst4, eps, error_type);
}
コード例 #2
0
void kanadeNextFrame(unsigned char* pixels, unsigned width, unsigned height)
{
	Timer t;

	_current::kanadeNextFrame(pixels, width, height);	
	buildPyramid(width, height);

	pyrTime += t.stop();
}
コード例 #3
0
ファイル: mario.c プロジェクト: sssmiiileee/cs50-pset1
int main(void)
{
	int height;	

	do {
		printf("height:");
		scanf("%i", &height);
	} while (height < 0 || height > 23);

	buildPyramid(height);
	
	return 0;
}
コード例 #4
0
ファイル: merge.cpp プロジェクト: 15751064254/opencv
    void process(InputArrayOfArrays src, OutputArray dst)
    {
        std::vector<Mat> images;
        src.getMatVector(images);
        checkImageDimensions(images);

        int channels = images[0].channels();
        CV_Assert(channels == 1 || channels == 3);
        Size size = images[0].size();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);

        std::vector<Mat> weights(images.size());
        Mat weight_sum = Mat::zeros(size, CV_32F);

        for(size_t i = 0; i < images.size(); i++) {
            Mat img, gray, contrast, saturation, wellexp;
            std::vector<Mat> splitted(channels);

            images[i].convertTo(img, CV_32F, 1.0f/255.0f);
            if(channels == 3) {
                cvtColor(img, gray, COLOR_RGB2GRAY);
            } else {
                img.copyTo(gray);
            }
            split(img, splitted);

            Laplacian(gray, contrast, CV_32F);
            contrast = abs(contrast);

            Mat mean = Mat::zeros(size, CV_32F);
            for(int c = 0; c < channels; c++) {
                mean += splitted[c];
            }
            mean /= channels;

            saturation = Mat::zeros(size, CV_32F);
            for(int c = 0; c < channels;  c++) {
                Mat deviation = splitted[c] - mean;
                pow(deviation, 2.0f, deviation);
                saturation += deviation;
            }
            sqrt(saturation, saturation);

            wellexp = Mat::ones(size, CV_32F);
            for(int c = 0; c < channels; c++) {
                Mat expo = splitted[c] - 0.5f;
                pow(expo, 2.0f, expo);
                expo = -expo / 0.08f;
                exp(expo, expo);
                wellexp = wellexp.mul(expo);
            }

            pow(contrast, wcon, contrast);
            pow(saturation, wsat, saturation);
            pow(wellexp, wexp, wellexp);

            weights[i] = contrast;
            if(channels == 3) {
                weights[i] = weights[i].mul(saturation);
            }
            weights[i] = weights[i].mul(wellexp) + 1e-12f;
            weight_sum += weights[i];
        }
        int maxlevel = static_cast<int>(logf(static_cast<float>(min(size.width, size.height))) / logf(2.0f));
        std::vector<Mat> res_pyr(maxlevel + 1);

        for(size_t i = 0; i < images.size(); i++) {
            weights[i] /= weight_sum;
            Mat img;
            images[i].convertTo(img, CV_32F, 1.0f/255.0f);

            std::vector<Mat> img_pyr, weight_pyr;
            buildPyramid(img, img_pyr, maxlevel);
            buildPyramid(weights[i], weight_pyr, maxlevel);

            for(int lvl = 0; lvl < maxlevel; lvl++) {
                Mat up;
                pyrUp(img_pyr[lvl + 1], up, img_pyr[lvl].size());
                img_pyr[lvl] -= up;
            }
            for(int lvl = 0; lvl <= maxlevel; lvl++) {
                std::vector<Mat> splitted(channels);
                split(img_pyr[lvl], splitted);
                for(int c = 0; c < channels; c++) {
                    splitted[c] = splitted[c].mul(weight_pyr[lvl]);
                }
                merge(splitted, img_pyr[lvl]);
                if(res_pyr[lvl].empty()) {
                    res_pyr[lvl] = img_pyr[lvl];
                } else {
                    res_pyr[lvl] += img_pyr[lvl];
                }
            }
        }
        for(int lvl = maxlevel; lvl > 0; lvl--) {
            Mat up;
            pyrUp(res_pyr[lvl], up, res_pyr[lvl - 1].size());
            res_pyr[lvl - 1] += up;
        }
        dst.create(size, CV_32FCC);
        res_pyr[0].copyTo(dst.getMat());
    }
コード例 #5
0
ファイル: STWarp.cpp プロジェクト: mgharbi/video_var
WarpingField<T> STWarp<T>::computeWarp() {
    if(params.verbosity >0) {
        printf("=== Computing warping field for %s, size %dx%dx%d(%d) ===\n",
            params.name.c_str(),
            dimensions[1],
            dimensions[0],
            dimensions[2],
            videoA->channelCount());
    }

    if(params.verbosity >0) {
        if(typeid(T)==typeid(float)) {
            printf("+ Single-precision computation\n");
        } else{
            printf("+ Double-precision computation\n");
        }
        if(params.bypassTimeWarp){
            printf("+ By-passing timewarp map\n");
        }
        printf("+ Regularizing lambda [%5f,%5f,%5f,%5f]\n",
                params.lambda[0],
                params.lambda[1],
                params.lambda[2],
                params.lambda[3]);
    }
    
    // Get dimensions of the pyramid levels
    vector<vector<int> > pyrSizes = getPyramidSizes();
    int nLevels = pyrSizes.size();

    // Build Pyramids
    vector<Video<stwarp_video_t>*> pyramidA(nLevels);
    vector<Video<stwarp_video_t>*> pyramidB(nLevels);
    buildPyramid(pyrSizes,pyramidA,pyramidB);

    WarpingField<T> warpField;
    if(initialWarpField) {
        warpField = *initialWarpField;
    } else {
        if(params.verbosity >0) {
            printf("+ Generating initial warp field\n");
        }
        warpField = WarpingField<T>(dimensions[0], dimensions[1], 
                dimensions[2], 3);
    }

    for (int i = nLevels-1; i >= 0 ; --i) {
        videoA = pyramidA[i];
        videoB = pyramidB[i];

        // update dimensions
        this->dimensions = videoA->dimensions();
        if(params.verbosity >0) {
            printf("+ Multiscale level %02d: %dx%dx%d (B:%d)\n", i+1,dimensions[1],
                dimensions[0],dimensions[2],videoB->frameCount());
        }

        // resample warping field
        resampleWarpingField(warpField,pyrSizes[i]);

        // computeUVW
        multiscaleIteration(warpField);

        if(params.verbosity >0) {
            printf("  x[%.4f, %.4f] ", warpField.min(0), warpField.max(0));
            printf("  y[%.4f, %.4f] ", warpField.min(1), warpField.max(1));
            printf("  t[%.4f, %.4f]\n", warpField.min(2), warpField.max(2));
        }

        // Cleanup allocated videos
        if (i != 0) {
            if( videoA != nullptr ){
                delete videoA;
                videoA = nullptr;
            }
            if( videoB != nullptr ){
                delete videoB;
                videoB = nullptr;
            }
        }
    }

    return warpField;
}
コード例 #6
0
ファイル: Main.cpp プロジェクト: zhuyongfeng/opencv-shuda
//#define  WEB_CAM
int main ( int argc, char** argv )
{
	initLight();
    //opencv cpp style
#ifdef WEB_CAM
	cv::VideoCapture cap ( 1 ); // 0: open the default camera
								// 1: open the integrated webcam
#else
	cv::VideoCapture cap("Cropped1.avi"); //("VDark.avi");//("VTreeTrunk.avi"); //("VRotatePersp.avi");//("VMouth.avi");// ("VCars.avi"); //("VZoomIn.avi");//("VSelf.avi");//("VFootball.mkv");//( "VRectLight.avi" );
	//("VCars.avi"); //("VRotateOrtho.avi"); //("VHand.avi"); 
	//("VPerson.avi");//("VHall.avi");// // ("VZoomOut.avi");// 
#endif

    if ( !cap.isOpened() ) return -1;
	
	btl::image::semidense::CSemiDenseTrackerOrb cSDTOrb;
	btl::image::semidense::CSemiDenseTracker cSDTFast;
	cv::gpu::GpuMat cvgmColorFrame,cvgmGrayFrame,cvgmColorFrameSmall; 
	cv::Mat cvmColorFrame, cvmGrayFrame, cvmTotalFrame;
	cap >> cvmColorFrame; cvgmColorFrame.upload(cvmColorFrame);
	//resize
	const float fScale = .5f;
	cv::gpu::resize(cvgmColorFrame,cvgmColorFrameSmall,cv::Size(0,0),fScale ,fScale );	
	//to gray
	cv::gpu::cvtColor(cvgmColorFrameSmall,cvgmGrayFrame,cv::COLOR_RGB2GRAY);
	initPyramid(cvgmGrayFrame.rows, cvgmGrayFrame.cols );
	buildPyramid(cvgmGrayFrame);
	cvmTotalFrame.create(cvgmColorFrameSmall.rows*2,cvgmColorFrameSmall.cols*2,CV_8UC3);
	cv::Mat cvmROI0(cvmTotalFrame, cv::Rect(	     0,					        0,			    cvgmColorFrameSmall.cols, cvgmColorFrameSmall.rows));
	cv::Mat cvmROI1(cvmTotalFrame, cv::Rect(       0,  			   cvgmColorFrameSmall.rows, cvgmColorFrameSmall.cols, cvgmColorFrameSmall.rows));
	cv::Mat cvmROI2(cvmTotalFrame, cv::Rect(cvgmColorFrameSmall.cols, cvgmColorFrameSmall.rows, cvgmColorFrameSmall.cols, cvgmColorFrameSmall.rows));
	cv::Mat cvmROI3(cvmTotalFrame, cv::Rect(cvgmColorFrameSmall.cols,          0,              cvgmColorFrameSmall.cols, cvgmColorFrameSmall.rows));
	//copy to total frame
	cvgmColorFrameSmall.download(cvmROI0); cvgmColorFrameSmall.download(cvmROI1); cvgmColorFrameSmall.download(cvmROI2); cvgmColorFrameSmall.download(cvmROI3);

	bool bIsInitSuccessful;
	bIsInitSuccessful = cSDTFast.init( _acvgmShrPtrPyrBWs );
	bIsInitSuccessful = cSDTOrb.init( _acvgmShrPtrPyrBWs );

	while(!bIsInitSuccessful){
		cap >> cvmColorFrame; cvgmColorFrame.upload(cvmColorFrame);
		//resize
		cv::gpu::resize(cvgmColorFrame,cvgmColorFrameSmall,cv::Size(0,0),fScale ,fScale );
		//to gray
		cv::gpu::cvtColor(cvgmColorFrameSmall,cvgmGrayFrame,cv::COLOR_RGB2GRAY);
		//copy into total frame	
		cvgmColorFrameSmall.download(cvmROI0); cvgmColorFrameSmall.download(cvmROI1); cvgmColorFrameSmall.download(cvmROI2); cvgmColorFrameSmall.download(cvmROI3);
		bIsInitSuccessful = cSDTOrb.init( _acvgmShrPtrPyrBWs );
		bIsInitSuccessful = cSDTFast.init( _acvgmShrPtrPyrBWs );
	}

    cv::namedWindow ( "Tracker", 1 );
	bool bStart = false;
	unsigned int uIdx = 0;
    for ( ;;uIdx++ ){
		double t = (double)cv::getTickCount();
		int nKey = cv::waitKey( 0 ) ;
		if ( nKey == 'a' ){
			bStart = true;
		}
		else if ( nKey == 'q'){
			break;
		}
		

		imshow ( "Tracker", cvmTotalFrame );
		if(!bStart) continue;
		//load a new frame
 		cap >> cvmColorFrame; 

		if (cvmColorFrame.empty()) {
			cap.set(CV_CAP_PROP_POS_AVI_RATIO,0);//replay at the end of the video
			cap >> cvmColorFrame; cvgmColorFrame.upload(cvmColorFrame);
			//resize
			cv::gpu::resize(cvgmColorFrame,cvgmColorFrameSmall,cv::Size(0,0),fScale ,fScale );
			//to gray
			cv::gpu::cvtColor(cvgmColorFrameSmall,cvgmGrayFrame,cv::COLOR_RGB2GRAY);
			buildPyramid(cvgmGrayFrame);
			//copy into total frame	
			cvgmColorFrameSmall.download(cvmROI0); cvgmColorFrameSmall.download(cvmROI1); cvgmColorFrameSmall.download(cvmROI2); cvgmColorFrameSmall.download(cvmROI3);
			cSDTOrb.init( _acvgmShrPtrPyrBWs );
			cSDTFast.init( _acvgmShrPtrPyrBWs );
			//get second frame
			cap >> cvmColorFrame; cvgmColorFrame.upload(cvmColorFrame);
			//resize
			cv::gpu::resize(cvgmColorFrame,cvgmColorFrameSmall,cv::Size(0,0),fScale ,fScale );
			//to gray
			cv::gpu::cvtColor(cvgmColorFrameSmall,cvgmGrayFrame,cv::COLOR_RGB2GRAY);
			//copy into total frame	
			cvgmColorFrameSmall.download(cvmROI0); cvgmColorFrameSmall.download(cvmROI1); cvgmColorFrameSmall.download(cvmROI2); cvgmColorFrameSmall.download(cvmROI3);
		}else{