Ejemplo n.º 1
0
int main(int argc, char** argv) {
	vector<Point> a,b;
	
	Mat src = imread("deer-18.png");
	if (src.empty()) {
		cerr << "can't read image" << endl; exit(0);
	}
	GetCurveForImage(src, a, false);
	ResampleCurve(a, a, 200, false);
	
	vector<Point2d> a_p2d;
	ConvertCurve(a, a_p2d);
	
	//create the target curve
	{
		//rotate and scale
		Scalar meanpt = mean(a);
		Mat_<double> trans_to = getRotationMatrix2D(Point2f(meanpt[0],meanpt[1]), 5, 0.65);
		
		//trasnlate
		trans_to(0,2) += 40;
		trans_to(1,2) += 40;
	
		vector<Point2d> b_p2d;
		cv::transform(a_p2d,b_p2d,trans_to);
		
		// everybody in the house - make some noise!
		cv::RNG rng(27628);
		for (int i=0; i<b_p2d.size(); i++) {
			b_p2d[i].x += (rng.uniform(0.0,1.0) - 0.5) * 20;
			b_p2d[i].y += (rng.uniform(0.0,1.0) - 0.5) * 20;
		}
		
		ConvertCurve(b_p2d, b);
		
		// occlude
		vector<Point> b_occ;
		for (int i=50; i<130; i++) {
			b_occ.push_back(b[i]);
		}
		ResampleCurve(b_occ, b, 200, true);
	}
	
	//Compare curves
	int a_len,a_off,b_len,b_off;
	double db_compare_score;
	CompareCurvesUsingSignatureDB(a, 
								  b,
								  a_len,
								  a_off,
								  b_len,
								  b_off,
								  db_compare_score
								  );

	//Get matched subsets of curves
	vector<Point> a_subset(a.begin() + a_off, a.begin() + a_off + a_len);
	vector<Point> b_subset(b.begin() + b_off, b.begin() + b_off + b_len);
	
	//Normalize to equal length
	ResampleCurve(a_subset, a_subset, 200, true);
	ResampleCurve(b_subset, b_subset, 200, true);
		
	//Visualize the original and target
	Mat outout(src.size(),CV_8UC3,Scalar::all(0));
	{
		//draw small original
		vector<Point2d> tmp_curve;
		cv::transform(a_p2d,tmp_curve,getRotationMatrix2D(Point2f(0,0),0,0.2));
		Mat tmp_curve_m(tmp_curve); tmp_curve_m += Scalar(25,0);
		drawOpenCurve(outout, tmp_curve, Scalar(255), 1);
		
		//draw small matched subset of original
		ConvertCurve(a_subset, tmp_curve);
		cv::transform(tmp_curve,tmp_curve,getRotationMatrix2D(Point2f(0,0),0,0.2));
		Mat tmp_curve_m1(tmp_curve); tmp_curve_m1 += Scalar(25,0);
		drawOpenCurve(outout, tmp_curve, Scalar(255,255), 2);

		//draw small target
		ConvertCurve(b, tmp_curve);
		cv::transform(tmp_curve,tmp_curve,getRotationMatrix2D(Point2f(0,0),0,0.2));
		Mat tmp_curve_m2(tmp_curve); tmp_curve_m2 += Scalar(outout.cols - 150,0);
		drawOpenCurve(outout, tmp_curve, Scalar(255,0,255), 1);

		//draw big target
		drawOpenCurve(outout, b, Scalar(0,0,255), 1);
		//draw big matched subset of target
		drawOpenCurve(outout, b_subset, Scalar(0,255,255), 1);
	}
	
	
	//Prepare the curves for finding the transformation
	vector<Point2f> seq_a_32f,seq_b_32f,seq_a_32f_,seq_b_32f_;

	ConvertCurve(a_subset, seq_a_32f_);
	ConvertCurve(b_subset, seq_b_32f_);
	
	assert(seq_a_32f_.size() == seq_b_32f_.size());
	
	seq_a_32f.clear(); seq_b_32f.clear();
	for (int i=0; i<seq_a_32f_.size(); i++) {
//		if(i%2 == 0) { // you can use only part of the points to find the transformation
			seq_a_32f.push_back(seq_a_32f_[i]);
			seq_b_32f.push_back(seq_b_32f_[i]);
//		}
	}
	assert(seq_a_32f.size() == seq_b_32f.size()); //just making sure
	
	vector<Point2d> seq_a_trans(a.size());
	
	//Find the fitting transformation
	//	Mat affineT = estimateRigidTransform(seq_a_32f,seq_b_32f,false); //may wanna use Affine here..
	Mat trans = Find2DRigidTransform(seq_a_32f, seq_b_32f);
	cout << trans;
	cv::transform(a_p2d,seq_a_trans,trans);
	
	//draw the result matching : the complete original curve as matched to the target 
	drawOpenCurve(outout, seq_a_trans, Scalar(0,255,0), 2);
	
	
	//May want to visualize point-by-point matching
//	cv::transform(seq_a_32f,seq_a_32f,trans);
//	for (int i=0; i<seq_a_32f.size(); i++) {
//		line(outout, seq_a_32f[i], seq_b_32f[i], Scalar(0,0,255), 1);
//	}
	
	imshow("outout", outout);
	
	waitKey();
	
}
Ejemplo n.º 2
0
/*
 * To work with Kinect or XtionPRO the user must install OpenNI library and PrimeSensorModule for OpenNI and
 * configure OpenCV with WITH_OPENNI flag is ON (using CMake).
 */
int main( int argc, char* argv[] )
{   
    time_t start = time(0);
    time_t start_frame_count;
    bool isColorizeDisp, isFixedMaxDisp;
    int imageMode;
    bool retrievedImageFlags[5];
    string filename;
    bool isVideoReading;
    //parseCommandLine( argc, argv, isColorizeDisp, isFixedMaxDisp, imageMode, retrievedImageFlags, filename, isVideoReading );

    cout << "Device opening ..." << endl;
    cout << CV_CAP_OPENNI <<endl;
    VideoCapture capture;
    if( isVideoReading )
        capture.open( filename );
    else
        capture.open(CV_CAP_OPENNI);

    cout << "done." << endl;

    if( !capture.isOpened() )
    {
        cout << "Can not open a capture object." << endl;
        return -1;
    }

    if( !isVideoReading )
    {
        bool modeRes=false;
        switch ( imageMode )
        {
            case 0:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ );
                break;
            case 1:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ );
                break;
            case 2:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ );
                break;
                //The following modes are only supported by the Xtion Pro Live
            case 3:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ );
                break;
            case 4:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ );
                break;
            default:
                CV_Error( CV_StsBadArg, "Unsupported image mode property.\n");
        }
        if (!modeRes)
            cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl;
    }
    if(capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0) capture.set(CV_CAP_PROP_OPENNI_REGISTRATION,1);
    // Print some avalible device settings.
    cout << "\nDepth generator output mode:" << endl <<
            "FRAME_WIDTH      " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl <<
            "FRAME_HEIGHT     " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
            "FRAME_MAX_DEPTH  " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
            "FPS              " << capture.get( CV_CAP_PROP_FPS ) << endl <<
            "REGISTRATION     " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl;
    if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
    {
        cout <<
            "\nImage generator output mode:" << endl <<
            "FRAME_WIDTH   " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl <<
            "FRAME_HEIGHT  " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
            "FPS           " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
    }
    else
    {
        cout << "\nDevice doesn't contain image generator." << endl;
        if (!retrievedImageFlags[0] && !retrievedImageFlags[1] && !retrievedImageFlags[2])
            return 0;
    }
    if( !face_cascade.load( cascade_name[0] ) )
    { 
	printf("--(!)Error loading\n"); return -1; 
    };
    if( !eyes_cascade.load( cascade_name[1] ) )
    { 
	printf("--(!)Error loading\n"); return -1; 
    };
    //printf("Entering for\n");

    int last_printed = 0;
    int WAIT_SEC = 10;

    for(;;)
    {
        Mat depthMap;
        Point image_center;
        Mat Display_image;
        Mat validDepthMap;
        Mat disparityMap;
        Mat bgrImage;
        Mat grayImage;
        Mat show;
        double seconds_since_start = difftime( time(0), start);

        if( !capture.grab() )
        {
            cout << "Can not grab images." << endl;
            return -1;
        }
        else
        {
            if( capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) )
            {
                const float scaleFactor = 0.05f;
		depthMap.convertTo( show, CV_8UC1, scaleFactor );
                //imshow( "depth map", show );
            }

            if( capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE ) ) {
                
            // Align nose with the circle


                int rad = 40;
               	int row_rgb = bgrImage.rows;
            	int col_rgb = bgrImage.cols;
                image_center.y = row_rgb/2 - 100;
                image_center.x = col_rgb/2;
                Display_image = bgrImage.clone();
                // Copying bgrImage so that circle is shown temporarily only
                if( seconds_since_start < WAIT_SEC ) {
                circle( Display_image, image_center, rad, Scalar( 255, 0, 0 ), 3, 8, 0 );
                imshow( "rgb image", Display_image );
                }

                // Wait for a key Press
                //std::cin.ignore();
                // Now it will capture Golden data 
            }

        /*    if( retrievedImageFlags[4] && capture.retrieve( grayImage, CV_CAP_OPENNI_GRAY_IMAGE ) )
                imshow( "gray image", grayImage );*/

        int seconds = int(seconds_since_start);
        if(last_printed<seconds && seconds<=WAIT_SEC){
            printf(" Capturing Golden Face template after %d Seconds ...\n\n", WAIT_SEC - seconds);
                last_printed=seconds;
    
        }
            
	    if(!depthMap.empty() && !bgrImage.empty() && (seconds_since_start > WAIT_SEC))
		    detectAndDisplay(bgrImage, depthMap, argc, argv);
	    
	    //writeMatToFile("depth.txt",depthMap);
        }

        if( waitKey( 30 ) >= 0 )  {
            seconds_since_start = difftime( time(0), start) - WAIT_SEC;
            cout << endl << endl << " FPS is : " << ( (double)(filenumber - 1) )/seconds_since_start << endl; 
            cout << " Viola Jones Count is : " << viola_jones_count <<  " Total file count is : " << filenumber - 1 << endl; 
            cout << " Predictor Accuracy is : " << ( (double)(filenumber - viola_jones_count - 1 ) ) * 100 / (double) (filenumber - 1) << endl; 
            break;
        }
    }
    Trans_dump.close();
    return 0;
}
Ejemplo n.º 3
0
static void
StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true)
{
    if( imagelist.size() % 2 != 0 )
    {
        cout << "Error: the image list contains odd (non-even) number of elements\n";
        return;
    }

    bool displayCorners = false;//true;
    const int maxScale = 2;
    const float squareSize = 1.f;  // Set this to your actual square size
    // ARRAY AND VECTOR STORAGE:

    vector<vector<Point2f> > imagePoints[2];
    vector<vector<Point3f> > objectPoints;
    Size imageSize;

    int i, j, k, nimages = (int)imagelist.size()/2;

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    vector<string> goodImageList;

    for( i = j = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            const string& filename = imagelist[i*2+k];
            Mat img = imread(filename, 0);
            if(img.empty())
                break;
            if( imageSize == Size() )
                imageSize = img.size();
            else if( img.size() != imageSize )
            {
                cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
                break;
            }
            bool found = false;
            vector<Point2f>& corners = imagePoints[k][j];
            for( int scale = 1; scale <= maxScale; scale++ )
            {
                Mat timg;
                if( scale == 1 )
                    timg = img;
                else
                    resize(img, timg, Size(), scale, scale);
                found = findChessboardCorners(timg, boardSize, corners,
                    CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE);
                if( found )
                {
                    if( scale > 1 )
                    {
                        Mat cornersMat(corners);
                        cornersMat *= 1./scale;
                    }
                    break;
                }
            }
            if( displayCorners )
            {
                cout << filename << endl;
                Mat cimg, cimg1;
                cvtColor(img, cimg, COLOR_GRAY2BGR);
                drawChessboardCorners(cimg, boardSize, corners, found);
                double sf = 640./MAX(img.rows, img.cols);
                resize(cimg, cimg1, Size(), sf, sf);
                imshow("corners", cimg1);
                char c = (char)waitKey(500);
                if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
                    exit(-1);
            }
            else
                putchar('.');
            if( !found )
                break;
            cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
                         TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
                                      30, 0.01));
        }
        if( k == 2 )
        {
            goodImageList.push_back(imagelist[i*2]);
            goodImageList.push_back(imagelist[i*2+1]);
            j++;
        }
    }
    cout << j << " pairs have been successfully detected.\n";
    nimages = j;
    if( nimages < 2 )
    {
        cout << "Error: too little pairs to run the calibration\n";
        return;
    }

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    objectPoints.resize(nimages);

    for( i = 0; i < nimages; i++ )
    {
        for( j = 0; j < boardSize.height; j++ )
            for( k = 0; k < boardSize.width; k++ )
                objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0));
    }

    cout << "Running stereo calibration ...\n";

    Mat cameraMatrix[2], distCoeffs[2];
    cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
    cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
    Mat R, T, E, F;

    double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
                    cameraMatrix[0], distCoeffs[0],
                    cameraMatrix[1], distCoeffs[1],
                    imageSize, R, T, E, F,
                    CALIB_FIX_ASPECT_RATIO +
                    CALIB_ZERO_TANGENT_DIST +
                    CALIB_SAME_FOCAL_LENGTH +
                    CALIB_RATIONAL_MODEL +
                    CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
                    TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) );
    cout << "done with RMS error=" << rms << endl;

// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
    double err = 0;
    int npoints = 0;
    vector<Vec3f> lines[2];
    for( i = 0; i < nimages; i++ )
    {
        int npt = (int)imagePoints[0][i].size();
        Mat imgpt[2];
        for( k = 0; k < 2; k++ )
        {
            imgpt[k] = Mat(imagePoints[k][i]);
            undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
            computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
        }
        for( j = 0; j < npt; j++ )
        {
            double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
                                imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
                           fabs(imagePoints[1][i][j].x*lines[0][j][0] +
                                imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
            err += errij;
        }
        npoints += npt;
    }
    cout << "average reprojection err = " <<  err/npoints << endl;

    // save intrinsic parameters
    FileStorage fs("../data/intrinsics.yml", FileStorage::WRITE);
    if( fs.isOpened() )
    {
        fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
            "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
        fs.release();
    }
    else
        cout << "Error: can not save the intrinsic parameters\n";

    Mat R1, R2, P1, P2, Q;
    Rect validRoi[2];

    stereoRectify(cameraMatrix[0], distCoeffs[0],
                  cameraMatrix[1], distCoeffs[1],
                  imageSize, R, T, R1, R2, P1, P2, Q,
                  CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);

    fs.open("extrinsics.yml", FileStorage::WRITE);
    if( fs.isOpened() )
    {
        fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
        fs.release();
    }
    else
        cout << "Error: can not save the extrinsic parameters\n";

    // OpenCV can handle left-right
    // or up-down camera arrangements
    bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));

// COMPUTE AND DISPLAY RECTIFICATION
    if( !showRectified )
        return;

    Mat rmap[2][2];
// IF BY CALIBRATED (BOUGUET'S METHOD)
    if( useCalibrated )
    {
        // we already computed everything
    }
// OR ELSE HARTLEY'S METHOD
    else
 // use intrinsic parameters of each camera, but
 // compute the rectification transformation directly
 // from the fundamental matrix
    {
        vector<Point2f> allimgpt[2];
        for( k = 0; k < 2; k++ )
        {
            for( i = 0; i < nimages; i++ )
                std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
        }
        F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
        Mat H1, H2;
        stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);

        R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
        R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
        P1 = cameraMatrix[0];
        P2 = cameraMatrix[1];
    }

    //Precompute maps for cv::remap()
    initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

    Mat canvas;
    double sf;
    int w, h;
    if( !isVerticalStereo )
    {
        sf = 600./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h, w*2, CV_8UC3);
    }
    else
    {
        sf = 300./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h*2, w, CV_8UC3);
    }

    for( i = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
            remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
            cvtColor(rimg, cimg, COLOR_GRAY2BGR);
            Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
            resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);
            if( useCalibrated )
            {
                Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
                          cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
                rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
            }
        }

        if( !isVerticalStereo )
            for( j = 0; j < canvas.rows; j += 16 )
                line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
        else
            for( j = 0; j < canvas.cols; j += 16 )
                line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
        imshow("rectified", canvas);
        char c = (char)waitKey();
        if( c == 27 || c == 'q' || c == 'Q' )
            break;
    }
}
Ejemplo n.º 4
0
int main( int argc, char** argv )
{
	VideoCapture cap;
	TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);
	Size subPixWinSize(10,10), winSize(31,31);

	const int MAX_COUNT = 500;
	bool needToInit = false;
	bool nightMode = false;

	cap.open(0);

	if( !cap.isOpened() )
	{
		cout << "Could not initialize capturing...\n";
		return 0;
	}

	help();

	namedWindow( "PlaneTracking", 1 );
	setMouseCallback( "PlaneTracking", onMouse, 0 );

	Mat gray, prevGray, image;

	for(;;)
	{
		Mat frame;
		cap >> frame;
		if( frame.empty() )
			break;

		frame.copyTo(image);
		cvtColor(image, gray, CV_BGR2GRAY); 

		if( nightMode )
			image = Scalar::all(0);

		if( needToInit )
		{
			// automatic initialization
			/*goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
			cornerSubPix(gray, points[1], subPixWinSize, Size(-1,-1), termcrit);*/

			initTrackingPoints(frame);
			addRemovePt = false;
		}
		else if( !trackPtsPre.empty() )
		{
			vector<uchar> status;
			vector<float> err;
			if(prevGray.empty())
				gray.copyTo(prevGray);
			calcOpticalFlowPyrLK(prevGray, gray, trackPtsPre, trackPtsCur, status, err, winSize,
				3, termcrit, 0, 0, 0.001);

			size_t i, k;
			for( i = k = 0; i < trackPtsCur.size(); i++ )
			{
				if( addRemovePt )
				{
					if( norm(pt - trackPtsCur[i]) <= 5 )
					{
						addRemovePt = false;
						continue;
					}
				}

				if( !status[i] )
					continue;

				trackPtsCur[k++] = trackPtsCur[i];
				circle( image, trackPtsCur[i], 3, Scalar(0,255,0), -1, 8);
			}
			trackPtsCur.resize(k);
		}

		if( addRemovePt && trackPtsCur.size() < (size_t)MAX_COUNT )
		{
			vector<Point2f> tmp;
			tmp.push_back(pt);
			cornerSubPix( gray, tmp, winSize, cvSize(-1,-1), termcrit);
			trackPtsCur.push_back(tmp[0]);
			addRemovePt = false;
		}

		needToInit = false;
		imshow("LK Demo", image);

		char c = (char)waitKey(10);
		if( c == 27 )
			break;
		switch( c )
		{
		case 'r':
			needToInit = true;
			break;
		case 'c':
			trackPtsCur.clear();
			break;
		case 'n':
			nightMode = !nightMode;
			break;
		default:
			;
		}

		std::swap(trackPtsCur, trackPtsPre);
		swap(prevGray, gray);
	}

	return 0;
}
Ejemplo n.º 5
0
int main(int argc, char** argv)
{
  // check number of arguments
  if(argv[1] == NULL) 
	{
		ROS_ERROR("Webcam device number missing");
		return 1;
	}

  ros::init(argc, argv, "image_publisher");
  ros::NodeHandle nh;
  image_transport::ImageTransport it(nh);
  image_transport::Publisher pub = it.advertise("camera/image", 1);

	// webcam device nummer to integer
  std::istringstream video_sourceCmd(argv[1]);
  int video_source;
  // check of webcam device number is a real number
  if(!(video_sourceCmd >> video_source)) 
	{
		ROS_ERROR("Webcam device number is not a number");
		return 1;
	}

  VideoCapture cap(video_source);
	
  // check of webcam device number is correct
  if(!cap.isOpened())
	{
		ROS_ERROR("Webcam device nummer invalid");
		return 1;
	}
  Mat frame;
  sensor_msgs::ImagePtr msg;

	std::istringstream fpsCmd(argv[2]);
	double fps;
	// check of fps is a real number
	if(!(fpsCmd >> fps)) 
	{
		ROS_ERROR("Frame rate is not a number");
		return 1;
	}

  ros::Rate loop_rate(fps);
  while (nh.ok()) 
	{
    cap >> frame;
    // check for image content
    if(!frame.empty()) 
		{
			//convert OpenCV image to ROS image
      msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", frame).toImageMsg();
			//publish image
      pub.publish(msg);
			ROS_INFO("Publish frame");
      waitKey(1);
    }

    ros::spinOnce();
    loop_rate.sleep();
  }
}
int main(int argc, char ** argv)
{

	string gauss = "Gaussino";
	string canny = "Canny";
	string hough = "Hough";
	string binarizar = "Binarizar";
	string Otsu = "Otsu";
	string image_name = "";
	int number;
	Point min, max, start;

	ofstream myfile;

	myfile.open("data.txt");

	myfile << "ESCREVE QUALQUER COISA\n";
	

	clock_t t1, t2, t3, t4;
	double threshold1, threshold2, thres, minLength, maxGap;
	bool f1, f2, f3, f4, f5, f6, f7, f8, f9;
	string Result;
	ostringstream convert;
	//int i;
	float temp;

	//for (i = 1;  i <= 6; i++){

		//number = i;
		//convert << number;
		//Result = convert.str();
		//image_name = "a" + Result + ".JPG";
		image_name = "a2.JPG";
		//number++;
		//cout << number << endl;
		cout << image_name;


		myfile << image_name;
		myfile << "\n";

		t1 = clock();
		f1 = false;
		f2 = true;
		f3 = false;
		f4 = false;
		f5 = false;
		f6 = true;
		f7 = true;
		if (f7 == true){
			threshold1 = 10;
			threshold2 = 19;
		}
		f8 = false;
		f9 = true;
		if (f9 == true){
			thres = 10;// 40
			minLength = 20; //50
			maxGap = 30; //80

			/*
			CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );

			if ( !capture ) {
			fprintf( stderr, "ERROR: capture is NULL \n" );
			getchar();
			return -1;
			}
			string original = "original.jpg";
			string foto ="img";

			IplImage* frame = cvQueryFrame( capture );
			Mat img(frame);
			Mat I, I1, imge;
			cvtColor(img,imge,CV_RGB2GRAY);
			imge.convertTo(I, CV_8U);
			equalizeHist(I,I1);
			Mat aux = I1;
			savePictures(I1, original, foto);

			*/

			//realiza a leitura e carrega a imagem para a matriz I1
			// a imagem tem apenas 1 canal de cor e por isso foi usado o parametro CV_LOAD_IMAGE_GRAYSCALE
			Mat lara = imread("lara.JPG", CV_LOAD_IMAGE_GRAYSCALE);
			Mat I = imread(image_name, CV_LOAD_IMAGE_GRAYSCALE);
			if (I.empty())
				return -1;
			resize(I, I, lara.size(), 1.0, 1.0, INTER_LINEAR);
			Mat I1;
			//Mat aux = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); 
			equalizeHist(I, I1);


			Mat aux, original;

			aux = I1;

			//ShowImage(I, I1);
			// verifica se carregou e alocou a imagem com sucesso
			if (I1.empty())
				return -1;

			// tipo Size contem largura e altura da imagem, recebe o retorno do metodo .size()
			//imSize = I1.size();

			// Cria uma matriz do tamanho imSize, de 8 bits e 1 canal

			Mat I2 = Mat::zeros(I1.size(), CV_8UC1);


			if (f2 == true) {
				t2 = clock();
				for (int i = 1; i < MAX_KERNEL_LENGTH; i = i + 2)
					GaussianBlur(I1, I1, Size(i, i), 0, 0, BORDER_DEFAULT);
				//ShowImage(aux, I1);
				cout << "Guassiano tempo : ";
				temp = tempo(t2);
				savePictures(I1, image_name, gauss);
				myfile << "Gauss: ";
				myfile << temp;
				myfile << "\n";

			}

			if (f1 == true){
				t2 = clock();
				binarizacao(I1, 125);
				//ShowImage(aux, I1);
				cout << "binarizacao : ";
				temp = tempo(t2);
				savePictures(I1, image_name, binarizar);
				myfile << "Binarizacao: ";
				myfile << temp;
				myfile << "\n";


			}




			if (f3 == true){
				t2 = clock();
				inversao(I1);
				cout << "inversao : ";
				tempo(t2);

			}


			if (f4 == true){
				adaptiveThreshold(I1, I1, 255, ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 7, 0);
			}


			if (f5 == true)
				Laplacian(I1, I1, 125, 1, 1, 0, BORDER_DEFAULT);



			if (f7 == true){
				t2 = clock();
				Canny(I1, I2, threshold1, threshold2, 3, false);
				cout << "canny : ";
				temp = tempo(t2);
				savePictures(I2, image_name, canny);
				myfile << "Canny: " + (int)(temp * 1000);
				myfile << "\n";
			}



			if (f9 == true){
				t2 = clock();
				Hough(I2, aux, thres, minLength, maxGap);
				cout << "hough : ";
				temp = tempo(t2);
				savePictures(aux, image_name, hough);
				myfile << "Hough: ";
				myfile << temp;
				myfile << "\n";
			}

			if (f6 == true){
				t2 = clock();
				threshold_type = THRESH_BINARY;

				threshold(aux, I1, 9, max_BINARY_value, threshold_type);
				cout << "Threshold : ";
				//savePictures(aux, image_name, Otsu);
				temp = tempo(t2);
				myfile << "Threshold/OTSU: ";
				myfile << temp;
				myfile << "\n";
			}


			string name = Otsu + image_name;
			imwrite(name, aux);
			ShowImage(I1, aux);

			t2 = clock();
			max = maxPoint(aux);
			min = minPoint(aux);

			/*start.y = (max.y + min.y) / 2;
			start.x = (max.x + min.x) /2;*/

			start.x = max.x;
			start.y = max.y;

			Point end;

			end.x = start.x;
			end.y = aux.size().height;

			
			MyLine(I, start, end, image_name, 0.3);
			temp = tempo(t2);
			ShowImage(I, aux);

			myfile << "Rota: ";
			myfile << temp;
			myfile << "\n";

			temp = tempo(t1);
			cout << "Final time : ";
			myfile << "Final Time: ";
			myfile << temp;
			myfile << "\n";




			//float angle = Angle(aux, min, 5);

			//cout << angle; 

			

		}

	//}

		
		
		
		myfile.close();
		//ShowImage(aux, I1);

		//imwrite(argv[2], I2); // salva imagem I2 no arquivo definido pelo usuario em argv[2]
	//}
		return 0;
}
Ejemplo n.º 7
0
int Permute::forward(const Mat& bottom_blob, Mat& top_blob) const
{
    int w = bottom_blob.w;
    int h = bottom_blob.h;
    int channels = bottom_blob.c;

    // order_type
    // 0 = w h c
    // 1 = h w c
    // 2 = w c h
    // 3 = c w h
    // 4 = h c w
    // 5 = c h w

    if (order_type == 0)
    {
        top_blob = bottom_blob;
    }
    else if (order_type == 1)
    {
        top_blob.create(h, w, channels);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<channels; q++)
        {
            const float* ptr = bottom_blob.channel(q);
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < w; i++)
            {
                for (int j = 0; j < h; j++)
                {
                    outptr[i*h + j] = ptr[j*w + i];
                }
            }
        }
    }
    else if (order_type == 2)
    {
        top_blob.create(w, channels, h);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<h; q++)
        {
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < channels; i++)
            {
                const float* ptr = bottom_blob.channel(i).row(q);

                for (int j = 0; j < w; j++)
                {
                    outptr[i*w + j] = ptr[j];
                }
            }
        }
    }
    else if (order_type == 3)
    {
        top_blob.create(channels, w, h);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<h; q++)
        {
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < w; i++)
            {
                for (int j = 0; j < channels; j++)
                {
                    const float* ptr = bottom_blob.channel(j).row(q);

                    outptr[i*channels + j] = ptr[i];
                }
            }
        }
    }
    else if (order_type == 4)
    {
        top_blob.create(h, channels, w);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<w; q++)
        {
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < channels; i++)
            {
                const float* ptr = bottom_blob.channel(i);

                for (int j = 0; j < h; j++)
                {
                    outptr[i*channels + j] = ptr[j*w + q];
                }
            }
        }
    }
    else if (order_type == 5)
    {
        top_blob.create(channels, h, w);
        if (top_blob.empty())
            return -100;

        #pragma omp parallel for
        for (int q=0; q<w; q++)
        {
            float* outptr = top_blob.channel(q);

            for (int i = 0; i < h; i++)
            {
                for (int j = 0; j < channels; j++)
                {
                    const float* ptr = bottom_blob.channel(j);

                    outptr[i*channels + j] = ptr[i*w + q];
                }
            }
        }
    }

    return 0;
}
Ejemplo n.º 8
0
int main(int argc, char* argv[])
{
	/* Tserial *com;
   com = new Tserial();
   com->connect("COM11", 9600, spNONE);                            //check com port		B
   cvWaitKey(5000);
	
	*/
	    
	
	ofstream outfile;
outfile.open("currentangle.txt");
	port_initialize("/dev/ttyACM1","9600");						//D
	cout<<"connected"<<endl;
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	//matrix storage for HSV image
	Mat HSV;
	//matrix storage for binary threshold image
	Mat threshold;
	//x and y values for the location of the object
	int x=0, y=0;
	int gradient1,gradient2,mac,sac,pac,zac;
	float slope,angle;
	
	//create slider bars for HSV filtering	//createTrackbars();
	//video capture object to acquire webcam feed
	VideoCapture capture(argv[1]);
	//open capture object at location zero (default location for webcam)
	//capture.open(0);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		//store image to matrix
		capture.read(cameraFeed);
		//convert frame from BGR to HSV colorspace
		cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		//filter HSV image between values and store filtered image to
		//threshold matrix
		inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),threshold);
		//show frames 
		
		    Mat src = threshold;
 if(src.empty())
 {
    
     return -1;
 }

 Mat dst, cdst;
 Canny(src, dst, 100, 300, 3);    			//PARAMETER A  : HIGHER MUST BE 3 TIMES THE LOWER, SO LOWER ONLY NEED TO BE SET
 cvtColor(dst, cdst, CV_GRAY2BGR);

 #if 0
  vector<Vec2f> lines;
  HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );

  for( size_t i = 0; i < lines.size(); i++ )
  {
     float rho = lines[i][0], theta = lines[i][1];
     Point pt1, pt2;
     double a = cos(theta), b = sin(theta);
     double x0 = a*rho, y0 = b*rho;
     pt1.x = cvRound(x0 + 1000*(-b));
     pt1.y = cvRound(y0 + 1000*(a));
     pt2.x = cvRound(x0 - 1000*(-b));
     pt2.y = cvRound(y0 - 1000*(a));
     line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
  }
 #else
  vector<Vec4i> lines;
  HoughLinesP(dst, lines, 1, CV_PI/180, 85, 100, 100 );		//PARAMETER B:NO. OF INTERSECTIONS-85BESTFORNOW
  for( size_t i = 0; i < lines.size(); i++ )
  {
    
    /*if(i==2)
    {
    	system("java -jar livegraph.jar");
    }*/
    if(i==1)
    {
    	gradient1 =(lines[0][3]-lines[0][1])/(lines[0][2]-lines[0][0]) ;
 	Vec4i l = lines[i];
 	gradient2 = (lines[1][3]-lines[1][1])/(lines[1][2]-lines[1][0]) ;
 	if (gradient1==gradient2)
  	{
   		mac = (lines[0][0]+lines[1][0])/2;
   		sac = (lines[0][2]+lines[1][2])/2;
		pac = (lines[0][1]+lines[1][1])/2;
    		zac = (lines[0][3]+lines[1][3])/2;
		line(cdst, Point(mac, pac), Point(sac, zac), Scalar(0,0,255), 3, CV_AA);
		slope=(zac-pac)/(float)(sac-mac);
		if((57.2957795*tanh(slope))>0){
		angle=(90-(57.2957795*tanh(slope)));}
		else
		{
			angle=(57.2957795*tanh(slope));
		}
		angleint=(int)angle;
		//Arduino_Output << (int *)angleint << std::flush;;
		
		
		
		//create a char array and store the ASCII value in that
		char angl[3];
		if(angleint<0)
		{
			angl[0]='-';
		}
else
angl[0]='+';
		angl[1]='0'+angleint/10;
		angl[2]='0'+angleint-10*(angleint/10);					//C
		//com->sendArray(angl,2);
		outfile << angl << endl;
		int temp=10;
		Arduino_Output << (char *)angl << std::flush;
		cout << angleint << endl;
		}
		
    }
    
     Vec4i l = lines[i];
    line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
  }

 #endif
 imshow("source", src);
 imshow("detected lines", cdst);
		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		
	char c= waitKey(30);
if(c==27)break;
	}
		

outfile.close();


	
	return 0;
}
/*
  Calculate occluded regions of reference image (left image) (regions that are occluded in the matching image (right image),
  i.e., where the forward-mapped disparity lands at a location with a larger (nearer) disparity) and non occluded regions.
*/
void computeOcclusionBasedMasks( const Mat& leftDisp, const Mat& _rightDisp,
                             Mat* occludedMask, Mat* nonOccludedMask,
                             const Mat& leftUnknDispMask = Mat(), const Mat& rightUnknDispMask = Mat(),
                             float dispThresh = EVAL_DISP_THRESH )
{
    if( !occludedMask && !nonOccludedMask )
        return;
    checkDispMapsAndUnknDispMasks( leftDisp, _rightDisp, leftUnknDispMask, rightUnknDispMask );

    Mat rightDisp;
    if( _rightDisp.empty() )
    {
        if( !rightUnknDispMask.empty() )
           CV_Error( CV_StsBadArg, "rightUnknDispMask must be empty if _rightDisp is empty" );
        rightDisp.create(leftDisp.size(), CV_32FC1);
        rightDisp.setTo(Scalar::all(0) );
        for( int leftY = 0; leftY < leftDisp.rows; leftY++ )
        {
            for( int leftX = 0; leftX < leftDisp.cols; leftX++ )
            {
                if( !leftUnknDispMask.empty() && leftUnknDispMask.at<uchar>(leftY,leftX) )
                    continue;
                float leftDispVal = leftDisp.at<float>(leftY, leftX);
                int rightX = leftX - cvRound(leftDispVal), rightY = leftY;
                if( rightX >= 0)
                    rightDisp.at<float>(rightY,rightX) = max(rightDisp.at<float>(rightY,rightX), leftDispVal);
            }
        }
    }
    else
        _rightDisp.copyTo(rightDisp);

    if( occludedMask )
    {
        occludedMask->create(leftDisp.size(), CV_8UC1);
        occludedMask->setTo(Scalar::all(0) );
    }
    if( nonOccludedMask )
    {
        nonOccludedMask->create(leftDisp.size(), CV_8UC1);
        nonOccludedMask->setTo(Scalar::all(0) );
    }
    for( int leftY = 0; leftY < leftDisp.rows; leftY++ )
    {
        for( int leftX = 0; leftX < leftDisp.cols; leftX++ )
        {
            if( !leftUnknDispMask.empty() && leftUnknDispMask.at<uchar>(leftY,leftX) )
                continue;
            float leftDispVal = leftDisp.at<float>(leftY, leftX);
            int rightX = leftX - cvRound(leftDispVal), rightY = leftY;
            if( rightX < 0 && occludedMask )
                occludedMask->at<uchar>(leftY, leftX) = 255;
            else
            {
                if( !rightUnknDispMask.empty() && rightUnknDispMask.at<uchar>(rightY,rightX) )
                    continue;
                float rightDispVal = rightDisp.at<float>(rightY, rightX);
                if( rightDispVal > leftDispVal + dispThresh )
                {
                    if( occludedMask )
                        occludedMask->at<uchar>(leftY, leftX) = 255;
                }
                else
                {
                    if( nonOccludedMask )
                        nonOccludedMask->at<uchar>(leftY, leftX) = 255;
                }
            }
        }
    }
}
Ejemplo n.º 10
0
void CV_ChessboardDetectorTest::run_batch( const string& filename )
{
    CvTS& ts = *this->ts;
    ts.set_failed_test_info( CvTS::OK );

    ts.printf(CvTS::LOG, "\nRunning batch %s\n", filename.c_str());
//#define WRITE_POINTS 1
#ifndef WRITE_POINTS    
    double max_rough_error = 0, max_precise_error = 0;
#endif
    string folder = string(ts.get_data_path()) + "cameracalibration/";

    FileStorage fs( folder + filename, FileStorage::READ );
    FileNode board_list = fs["boards"];
        
    if( !fs.isOpened() || board_list.empty() || !board_list.isSeq() || board_list.size() % 2 != 0 )
    {
        ts.printf( CvTS::LOG, "%s can not be readed or is not valid\n", (folder + filename).c_str() );
        ts.printf( CvTS::LOG, "fs.isOpened=%d, board_list.empty=%d, board_list.isSeq=%d,board_list.size()%2=%d\n", 
            fs.isOpened(), (int)board_list.empty(), board_list.isSeq(), board_list.size()%2);
        ts.set_failed_test_info( CvTS::FAIL_MISSING_TEST_DATA );        
        return;
    }

    int progress = 0;
    int max_idx = board_list.node->data.seq->total/2;
    double sum_error = 0.0;
    int count = 0;

    for(int idx = 0; idx < max_idx; ++idx )
    {
        ts.update_context( this, idx, true );
        
        /* read the image */
        string img_file = board_list[idx * 2];                    
        Mat gray = imread( folder + img_file, 0);
                
        if( gray.empty() )
        {
            ts.printf( CvTS::LOG, "one of chessboard images can't be read: %s\n", img_file.c_str() );
            ts.set_failed_test_info( CvTS::FAIL_MISSING_TEST_DATA );
            continue;
        }

        string filename = folder + (string)board_list[idx * 2 + 1];
        Mat expected;
        {
            CvMat *u = (CvMat*)cvLoad( filename.c_str() );
            if(!u )
            {                
                ts.printf( CvTS::LOG, "one of chessboard corner files can't be read: %s\n", filename.c_str() ); 
                ts.set_failed_test_info( CvTS::FAIL_MISSING_TEST_DATA );
                continue;                
            }
            expected = Mat(u, true);
            cvReleaseMat( &u );
        }                
        size_t count_exp = static_cast<size_t>(expected.cols * expected.rows);                
        Size pattern_size = expected.size();

        vector<Point2f> v;        
        bool result = findChessboardCorners(gray, pattern_size, v, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);        
        show_points( gray, Mat(), v, pattern_size, result );
        if( !result || v.size() != count_exp )
        {
            ts.printf( CvTS::LOG, "chessboard is not found in %s\n", img_file.c_str() );
            ts.set_failed_test_info( CvTS::FAIL_INVALID_OUTPUT );
            continue;
        }

#ifndef WRITE_POINTS
        double err = calcError(v, expected);
#if 0
        if( err > rough_success_error_level )
        {
            ts.printf( CvTS::LOG, "bad accuracy of corner guesses\n" );
            ts.set_failed_test_info( CvTS::FAIL_BAD_ACCURACY );
            continue;
        }
#endif
        max_rough_error = MAX( max_rough_error, err );
#endif
        cornerSubPix( gray, v, Size(5, 5), Size(-1,-1), TermCriteria(TermCriteria::EPS|TermCriteria::MAX_ITER, 30, 0.1));        
        //find4QuadCornerSubpix(gray, v, Size(5, 5));
        show_points( gray, expected, v, pattern_size, result  );

#ifndef WRITE_POINTS
//        printf("called find4QuadCornerSubpix\n");
        err = calcError(v, expected);
        sum_error += err;
        count++;
#if 1
        if( err > precise_success_error_level )
        {
            ts.printf( CvTS::LOG, "Image %s: bad accuracy of adjusted corners %f\n", img_file.c_str(), err ); 
            ts.set_failed_test_info( CvTS::FAIL_BAD_ACCURACY );
            continue;
        }
#endif
        ts.printf(CvTS::LOG, "Error on %s is %f\n", img_file.c_str(), err);
        max_precise_error = MAX( max_precise_error, err );
#else
        Mat mat_v(pattern_size, CV_32FC2, (void*)&v[0]);
        CvMat cvmat_v = mat_v;
        cvSave( filename.c_str(), &cvmat_v );
#endif
        progress = update_progress( progress, idx, max_idx, 0 );
    }    
    
    sum_error /= count;
    ts.printf(CvTS::LOG, "Average error is %f\n", sum_error);
}
Ejemplo n.º 11
0
bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
                       const cv::Mat& image0, const cv::Mat& _depth0, const cv::Mat& validMask0,
                       const cv::Mat& image1, const cv::Mat& _depth1, const cv::Mat& validMask1,
                       const cv::Mat& cameraMatrix, float minDepth, float maxDepth, float maxDepthDiff,
                       const std::vector<int>& iterCounts, const std::vector<float>& minGradientMagnitudes,
                       int transformType )
{
    const int sobelSize = 3;
    const double sobelScale = 1./8;

    Mat depth0 = _depth0.clone(),
        depth1 = _depth1.clone();

    // check RGB-D input data
    CV_Assert( !image0.empty() );
    CV_Assert( image0.type() == CV_8UC1 );
    CV_Assert( depth0.type() == CV_32FC1 && depth0.size() == image0.size() );

    CV_Assert( image1.size() == image0.size() );
    CV_Assert( image1.type() == CV_8UC1 );
    CV_Assert( depth1.type() == CV_32FC1 && depth1.size() == image0.size() );

    // check masks
    CV_Assert( validMask0.empty() || (validMask0.type() == CV_8UC1 && validMask0.size() == image0.size()) );
    CV_Assert( validMask1.empty() || (validMask1.type() == CV_8UC1 && validMask1.size() == image0.size()) );

    // check camera params
    CV_Assert( cameraMatrix.type() == CV_32FC1 && cameraMatrix.size() == Size(3,3) );

    // other checks
    CV_Assert( iterCounts.empty() || minGradientMagnitudes.empty() ||
               minGradientMagnitudes.size() == iterCounts.size() );
    CV_Assert( initRt.empty() || (initRt.type()==CV_64FC1 && initRt.size()==Size(4,4) ) );

    vector<int> defaultIterCounts;
    vector<float> defaultMinGradMagnitudes;
    vector<int> const* iterCountsPtr = &iterCounts;
    vector<float> const* minGradientMagnitudesPtr = &minGradientMagnitudes;

    if( iterCounts.empty() || minGradientMagnitudes.empty() )
    {
        defaultIterCounts.resize(4);
        defaultIterCounts[0] = 7;
        defaultIterCounts[1] = 7;
        defaultIterCounts[2] = 7;
        defaultIterCounts[3] = 10;

        defaultMinGradMagnitudes.resize(4);
        defaultMinGradMagnitudes[0] = 12;
        defaultMinGradMagnitudes[1] = 5;
        defaultMinGradMagnitudes[2] = 3;
        defaultMinGradMagnitudes[3] = 1;

        iterCountsPtr = &defaultIterCounts;
        minGradientMagnitudesPtr = &defaultMinGradMagnitudes;
    }

    preprocessDepth( depth0, depth1, validMask0, validMask1, minDepth, maxDepth );

    vector<Mat> pyramidImage0, pyramidDepth0,
                pyramidImage1, pyramidDepth1, pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1,
                pyramidCameraMatrix;
    buildPyramids( image0, image1, depth0, depth1, cameraMatrix, sobelSize, sobelScale, *minGradientMagnitudesPtr,
                   pyramidImage0, pyramidDepth0, pyramidImage1, pyramidDepth1,
                   pyramid_dI_dx1, pyramid_dI_dy1, pyramidTexturedMask1, pyramidCameraMatrix );

    Mat resultRt = initRt.empty() ? Mat::eye(4,4,CV_64FC1) : initRt.clone();
    Mat currRt, ksi;
    for( int level = (int)iterCountsPtr->size() - 1; level >= 0; level-- )
    {
        const Mat& levelCameraMatrix = pyramidCameraMatrix[level];

        const Mat& levelImage0 = pyramidImage0[level];
        const Mat& levelDepth0 = pyramidDepth0[level];
        Mat levelCloud0;
        cvtDepth2Cloud( pyramidDepth0[level], levelCloud0, levelCameraMatrix );

        const Mat& levelImage1 = pyramidImage1[level];
        const Mat& levelDepth1 = pyramidDepth1[level];
        const Mat& level_dI_dx1 = pyramid_dI_dx1[level];
        const Mat& level_dI_dy1 = pyramid_dI_dy1[level];

        CV_Assert( level_dI_dx1.type() == CV_16S );
        CV_Assert( level_dI_dy1.type() == CV_16S );

        const double fx = levelCameraMatrix.at<double>(0,0);
        const double fy = levelCameraMatrix.at<double>(1,1);
        const double determinantThreshold = 1e-6;

        Mat corresps( levelImage0.size(), levelImage0.type() );

        // Run transformation search on current level iteratively.
        for( int iter = 0; iter < (*iterCountsPtr)[level]; iter ++ )
        {
            int correspsCount = computeCorresp( levelCameraMatrix, levelCameraMatrix.inv(), resultRt.inv(DECOMP_SVD),
                                                levelDepth0, levelDepth1, pyramidTexturedMask1[level], maxDepthDiff,
                                                corresps );

            if( correspsCount == 0 )
                break;

            bool solutionExist = computeKsi( transformType,
                                             levelImage0, levelCloud0,
                                             levelImage1, level_dI_dx1, level_dI_dy1,
                                             corresps, correspsCount,
                                             fx, fy, sobelScale, determinantThreshold,
                                             ksi );

            if( !solutionExist )
                break;

            computeProjectiveMatrix( ksi, currRt );

            resultRt = currRt * resultRt;

#if SHOW_DEBUG_IMAGES
            std::cout << "currRt " << currRt << std::endl;
            Mat warpedImage0;
            const Mat distCoeff(1,5,CV_32FC1,Scalar(0));
            warpImage<uchar>( levelImage0, levelDepth0, resultRt, levelCameraMatrix, distCoeff, warpedImage0 );

            imshow( "im0", levelImage0 );
            imshow( "wim0", warpedImage0 );
            imshow( "im1", levelImage1 );
            waitKey();
#endif
        }
    }

    Rt = resultRt;

    return !Rt.empty();
}
Ejemplo n.º 12
0
/** @function main */
int main(int argc, const char** argv) {
    CvCapture* capture;
    Mat frame;
    char filename[13] = "Mission.avi";
    //-- 1. Load the cascades
    if (!face_cascade.load(face_cascade_name)) {
        printf("--(!)Error loading\n");
        return -1;
    };
    if (!eyes_cascade.load(eyes_cascade_name)) {
        printf("--(!)Error loading\n");
        return -1;
    };

    //++++++++++++++++++++++
    printf("------------- video to image ... ----------------\n");
    //初始化一个视频文件捕捉器
    capture = cvCaptureFromAVI(filename);
    //获取视频信息
    cvQueryFrame(capture);
    int frameH = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
    int frameW = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
    int fps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
    int numFrames =
        (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
    printf(
        "\tvideo height : %d\n\tvideo width : %d\n\tfps : %d\n\tframe numbers : %d\n",
        frameH, frameW, fps, numFrames);
    //定义和初始化变量
    int i = 0;
    IplImage* img = 0;
    char image_name[13];

    //cvNamedWindow( "mainWin", CV_WINDOW_AUTOSIZE );
    //读取和显示
    while (1) {

        //img = cvQueryFrame(capture); //获取一帧图片
        //cvShowImage( "mainWin", img ); //将其显示
        // char key = cvWaitKey(20);

        //sprintf(image_name, "%s%d%s", "image", ++i, ".jpg");//保存的图片名

        //cvSaveImage( image_name, img);   //保存一帧图片
        frame = cvQueryFrame(capture);

        //-- 3. Apply the classifier to the frame
        if (!frame.empty()) {
            detectAndDisplay(frame);
        } else {
            printf(" --(!) No captured frame -- Break!");
            break;
        }

        int c = waitKey(10);
        if ((char) c == 'c') {
            break;
        }

        if (i == NUM_FRAME)
            break;
    }
    cvReleaseCapture(&capture);
    //+++++++++++++++++++++++++
    //-- 2. Read the video stream
//	// capture = cvCaptureFromCAM( -1 );
//	if (capture) {
//		while (true) {
//			frame = cvQueryFrame(capture);
//
//			//-- 3. Apply the classifier to the frame
//			if (!frame.empty()) {
//				detectAndDisplay(frame);
//			} else {
//				printf(" --(!) No captured frame -- Break!");
//				break;
//			}
//
//			int c = waitKey(10);
//			if ((char) c == 'c') {
//				break;
//			}
//		}
//	} else
//		printf("nothing happen");
    return 0;
}
Ejemplo n.º 13
0
int main(int argc, char** argv)
{
    if(argc != 2) {
        help();
        return 1;
    }

    FILE* f = 0;
    VideoCapture cap;
    char test_file[20] = "";

    if (strcmp(argv[1], "live") != 0)
    {
        sprintf(test_file, "%s", argv[1]);
        f = fopen(test_file, "r");
        char vid[20];
        int values_read = fscanf(f, "%s\n", vid);
        CV_Assert(values_read == 1);
        cout << "Benchmarking against " << vid << endl;
        live = 0;
    }
    else
    {
        cap.open(0);
        if (!cap.isOpened())
        {
            cout << "Failed to open camera" << endl;
            return 0;
        }
        cout << "Opened camera" << endl;
        cap.set(CAP_PROP_FRAME_WIDTH, 640);
        cap.set(CAP_PROP_FRAME_HEIGHT, 480);
        cap >> frame;
    }

    HybridTrackerParams params;
    // motion model params
    params.motion_model = CvMotionModel::LOW_PASS_FILTER;
    params.low_pass_gain = 0.1f;
    // mean shift params
    params.ms_tracker_weight = 0.8f;
    params.ms_params.tracking_type = CvMeanShiftTrackerParams::HS;
    // feature tracking params
    params.ft_tracker_weight = 0.2f;
    params.ft_params.feature_type = CvFeatureTrackerParams::OPTICAL_FLOW;
    params.ft_params.window_size = 0;

    HybridTracker tracker(params);
    char img_file[20] = "seqG/0001.png";
    char img_file_num[10];
    namedWindow("Win", 1);

    setMouseCallback("Win", onMouse, 0);

    int i = 0;
    float w[4];
    for(;;)
    {
        i++;
        if (live)
        {
            cap >> frame;
            if( frame.empty() )
                break;
            frame.copyTo(image);
        }
        else
        {
            int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
            CV_Assert(values_read == 5);
            sprintf(img_file, "seqG/%04d.png", i);
            image = imread(img_file, IMREAD_COLOR);
            if (image.empty())
                break;
            selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
                             cvRound(w[2]*image.cols), cvRound(w[3]*image.rows));
        }

        sprintf(img_file_num, "Frame: %d", i);
        putText(image, img_file_num, Point(10, image.rows-20), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
        if (!image.empty())
        {

            if (trackObject < 0)
            {
                tracker.newTracker(image, selection);
                trackObject = 1;
            }

            if (trackObject)
            {
                tracker.updateTracker(image);
                drawRectangle(&image, tracker.getTrackingWindow());
            }

            if (selectObject && selection.width > 0 && selection.height > 0)
            {
                Mat roi(image, selection);
                bitwise_not(roi, roi);
            }

            drawRectangle(&image, Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
                                       cvRound(w[2]*image.cols), cvRound(w[3]*image.rows)));
            imshow("Win", image);

            waitKey(100);
        }
        else
            i = 0;
    }
Ejemplo n.º 14
0
Archivo: ecc.cpp Proyecto: 4auka/opencv
double cv::findTransformECC(InputArray templateImage,
                            InputArray inputImage,
                            InputOutputArray warpMatrix,
                            int motionType,
                            TermCriteria criteria)
{


    Mat src = templateImage.getMat();//template iamge
    Mat dst = inputImage.getMat(); //input image (to be warped)
    Mat map = warpMatrix.getMat(); //warp (transformation)

    CV_Assert(!src.empty());
    CV_Assert(!dst.empty());


    if( ! (src.type()==dst.type()))
        CV_Error( CV_StsUnmatchedFormats, "Both input images must have the same data type" );

    //accept only 1-channel images
    if( src.type() != CV_8UC1 && src.type()!= CV_32FC1)
        CV_Error( CV_StsUnsupportedFormat, "Images must have 8uC1 or 32fC1 type");

    if( map.type() != CV_32FC1)
        CV_Error( CV_StsUnsupportedFormat, "warpMatrix must be single-channel floating-point matrix");

    CV_Assert (map.cols == 3);
    CV_Assert (map.rows == 2 || map.rows ==3);

    CV_Assert (motionType == MOTION_AFFINE || motionType == MOTION_HOMOGRAPHY ||
        motionType == MOTION_EUCLIDEAN || motionType == MOTION_TRANSLATION);

    if (motionType == MOTION_HOMOGRAPHY){
        CV_Assert (map.rows ==3);
    }

    CV_Assert (criteria.type & TermCriteria::COUNT || criteria.type & TermCriteria::EPS);
    const int    numberOfIterations = (criteria.type & TermCriteria::COUNT) ? criteria.maxCount : 200;
    const double termination_eps    = (criteria.type & TermCriteria::EPS)   ? criteria.epsilon  :  -1;

    int paramTemp = 6;//default: affine
    switch (motionType){
      case MOTION_TRANSLATION:
          paramTemp = 2;
          break;
      case MOTION_EUCLIDEAN:
          paramTemp = 3;
          break;
      case MOTION_HOMOGRAPHY:
          paramTemp = 8;
          break;
    }


    const int numberOfParameters = paramTemp;

    const int ws = src.cols;
    const int hs = src.rows;
    const int wd = dst.cols;
    const int hd = dst.rows;

    Mat Xcoord = Mat(1, ws, CV_32F);
    Mat Ycoord = Mat(hs, 1, CV_32F);
    Mat Xgrid = Mat(hs, ws, CV_32F);
    Mat Ygrid = Mat(hs, ws, CV_32F);

    float* XcoPtr = Xcoord.ptr<float>(0);
    float* YcoPtr = Ycoord.ptr<float>(0);
    int j;
    for (j=0; j<ws; j++)
        XcoPtr[j] = (float) j;
    for (j=0; j<hs; j++)
        YcoPtr[j] = (float) j;

    repeat(Xcoord, hs, 1, Xgrid);
    repeat(Ycoord, 1, ws, Ygrid);

    Xcoord.release();
    Ycoord.release();

    Mat templateZM    = Mat(hs, ws, CV_32F);// to store the (smoothed)zero-mean version of template
    Mat templateFloat = Mat(hs, ws, CV_32F);// to store the (smoothed) template
    Mat imageFloat    = Mat(hd, wd, CV_32F);// to store the (smoothed) input image
    Mat imageWarped   = Mat(hs, ws, CV_32F);// to store the warped zero-mean input image
    Mat allOnes		= Mat::ones(hd, wd, CV_8U); //to use it for mask warping
    Mat imageMask		= Mat(hs, ws, CV_8U); //to store the final mask

    //gaussian filtering is optional
    src.convertTo(templateFloat, templateFloat.type());
    GaussianBlur(templateFloat, templateFloat, Size(5, 5), 0, 0);//is in-place filtering slower?

    dst.convertTo(imageFloat, imageFloat.type());
    GaussianBlur(imageFloat, imageFloat, Size(5, 5), 0, 0);

    // needed matrices for gradients and warped gradients
    Mat gradientX = Mat::zeros(hd, wd, CV_32FC1);
    Mat gradientY = Mat::zeros(hd, wd, CV_32FC1);
    Mat gradientXWarped = Mat(hs, ws, CV_32FC1);
    Mat gradientYWarped = Mat(hs, ws, CV_32FC1);


    // calculate first order image derivatives
    Matx13f dx(-0.5f, 0.0f, 0.5f);

    filter2D(imageFloat, gradientX, -1, dx);
    filter2D(imageFloat, gradientY, -1, dx.t());


    // matrices needed for solving linear equation system for maximizing ECC
    Mat jacobian                = Mat(hs, ws*numberOfParameters, CV_32F);
    Mat hessian                 = Mat(numberOfParameters, numberOfParameters, CV_32F);
    Mat hessianInv              = Mat(numberOfParameters, numberOfParameters, CV_32F);
    Mat imageProjection         = Mat(numberOfParameters, 1, CV_32F);
    Mat templateProjection      = Mat(numberOfParameters, 1, CV_32F);
    Mat imageProjectionHessian  = Mat(numberOfParameters, 1, CV_32F);
    Mat errorProjection         = Mat(numberOfParameters, 1, CV_32F);

    Mat deltaP = Mat(numberOfParameters, 1, CV_32F);//transformation parameter correction
    Mat error = Mat(hs, ws, CV_32F);//error as 2D matrix

    const int imageFlags = CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS+CV_WARP_INVERSE_MAP;
    const int maskFlags  = CV_INTER_NN+CV_WARP_FILL_OUTLIERS+CV_WARP_INVERSE_MAP;


    // iteratively update map_matrix
    double rho      = -1;
    double last_rho = - termination_eps;
    for (int i = 1; (i <= numberOfIterations) && (fabs(rho-last_rho)>= termination_eps); i++)
    {

        // warp-back portion of the inputImage and gradients to the coordinate space of the templateImage
        if (motionType != MOTION_HOMOGRAPHY)
        {
            warpAffine(imageFloat, imageWarped,     map, imageWarped.size(),     imageFlags);
            warpAffine(gradientX,  gradientXWarped, map, gradientXWarped.size(), imageFlags);
            warpAffine(gradientY,  gradientYWarped, map, gradientYWarped.size(), imageFlags);
            warpAffine(allOnes,    imageMask,       map, imageMask.size(),       maskFlags);
        }
        else
        {
            warpPerspective(imageFloat, imageWarped,     map, imageWarped.size(),     imageFlags);
            warpPerspective(gradientX,  gradientXWarped, map, gradientXWarped.size(), imageFlags);
            warpPerspective(gradientY,  gradientYWarped, map, gradientYWarped.size(), imageFlags);
            warpPerspective(allOnes,    imageMask,       map, imageMask.size(),       maskFlags);
        }


        Scalar imgMean, imgStd, tmpMean, tmpStd;
        meanStdDev(imageWarped,   imgMean, imgStd, imageMask);
        meanStdDev(templateFloat, tmpMean, tmpStd, imageMask);

        subtract(imageWarped,   imgMean, imageWarped, imageMask);//zero-mean input
        subtract(templateFloat, tmpMean, templateZM,  imageMask);//zero-mean template

        const double tmpNorm = std::sqrt(countNonZero(imageMask)*(tmpStd.val[0])*(tmpStd.val[0]));
        const double imgNorm = std::sqrt(countNonZero(imageMask)*(imgStd.val[0])*(imgStd.val[0]));

        // calculate jacobian of image wrt parameters
        switch (motionType){
            case MOTION_AFFINE:
                image_jacobian_affine_ECC(gradientXWarped, gradientYWarped, Xgrid, Ygrid, jacobian);
                break;
            case MOTION_HOMOGRAPHY:
                image_jacobian_homo_ECC(gradientXWarped, gradientYWarped, Xgrid, Ygrid, map, jacobian);
                break;
            case MOTION_TRANSLATION:
                image_jacobian_translation_ECC(gradientXWarped, gradientYWarped, jacobian);
                break;
            case MOTION_EUCLIDEAN:
                image_jacobian_euclidean_ECC(gradientXWarped, gradientYWarped, Xgrid, Ygrid, map, jacobian);
                break;
        }

        // calculate Hessian and its inverse
        project_onto_jacobian_ECC(jacobian, jacobian, hessian);

        hessianInv = hessian.inv();

        const double correlation = templateZM.dot(imageWarped);

        // calculate enhanced correlation coefficiont (ECC)->rho
        last_rho = rho;
        rho = correlation/(imgNorm*tmpNorm);

        // project images into jacobian
        project_onto_jacobian_ECC( jacobian, imageWarped, imageProjection);
        project_onto_jacobian_ECC(jacobian, templateZM, templateProjection);


        // calculate the parameter lambda to account for illumination variation
        imageProjectionHessian = hessianInv*imageProjection;
        const double lambda_n = (imgNorm*imgNorm) - imageProjection.dot(imageProjectionHessian);
        const double lambda_d = correlation - templateProjection.dot(imageProjectionHessian);
        if (lambda_d <= 0.0)
        {
            rho = -1;
            CV_Error(CV_StsNoConv, "The algorithm stopped before its convergence. The correlation is going to be minimized. Images may be uncorrelated or non-overlapped");

        }
        const double lambda = (lambda_n/lambda_d);

        // estimate the update step delta_p
        error = lambda*templateZM - imageWarped;
        project_onto_jacobian_ECC(jacobian, error, errorProjection);
        deltaP = hessianInv * errorProjection;

        // update warping matrix
        update_warping_matrix_ECC( map, deltaP, motionType);


    }

    // return final correlation coefficient
    return rho;
}
Ejemplo n.º 15
0
int main(int argc, char** argv)
{
	VideoCapture cap;
	Mat frame;

	frame.create(Size(FRAME_WIDTH, FRAME_HEIGHT), CV_8UC1);

	//if ( frame.isContinuous() ) cout << "yes" << endl;
	//Open RGB Camera
	cap.open(0);
	cap.set(cv::CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
	cap.set(cv::CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);

	if( !cap.isOpened() )
	{
		cout << "Can not open camera !!" << endl;
		return -1;
	}

	//read frame
	cap >> frame;
	if( frame.empty() )
	{
		cout << "Can not read data from the Camera !!" << endl;
		return -1;
	}

	gpu_initialize_gmm(frame.ptr(0));

	cout << "frame.cols: " << frame.cols << endl;
	cout << "frame.rows: " << frame.rows << endl;

	for(;;)
	{
		//Get RGB Image
		cap >> frame;

		if( frame.empty() )
		{
			cout << "Can not read data from the Camera !!" << endl;
			return -1;
		}
		
		//GMM output
		Mat gmm_frame;
		gmm_frame.create(frame.size(), frame.type());
		gmm_frame = Mat::zeros(frame.size(), CV_8UC1);
		
		gpu_perform_gmm(frame.ptr(0), gmm_frame.ptr(0));
		//Show the GMM result image
		imshow("GMM", gmm_frame);

		//User Key Input
		char c = waitKey(10);
		if (c == 27) break; // got ESC
	}
	
	gpu_free_gmm();

	return 0;
}
void CV_StereoMatchingTest::run(int)
{
    string dataPath = ts->get_data_path();
    string algorithmName = name;
    assert( !algorithmName.empty() );
    if( dataPath.empty() )
    {
        ts->printf( cvtest::TS::LOG, "dataPath is empty" );
        ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ARG_CHECK );
        return;
    }

    FileStorage datasetsFS( dataPath + DATASETS_DIR + DATASETS_FILE, FileStorage::READ );
    int code = readDatasetsParams( datasetsFS );
    if( code != cvtest::TS::OK )
    {
        ts->set_failed_test_info( code );
        return;
    }
    FileStorage runParamsFS( dataPath + ALGORITHMS_DIR + algorithmName + RUN_PARAMS_FILE, FileStorage::READ );
    code = readRunParams( runParamsFS );
    if( code != cvtest::TS::OK )
    {
        ts->set_failed_test_info( code );
        return;
    }
    
    string fullResultFilename = dataPath + ALGORITHMS_DIR + algorithmName + RESULT_FILE;
    FileStorage resFS( fullResultFilename, FileStorage::READ );
    bool isWrite = true; // write or compare results
    if( resFS.isOpened() )
        isWrite = false;
    else
    {
        resFS.open( fullResultFilename, FileStorage::WRITE );
        if( !resFS.isOpened() )
        {
            ts->printf( cvtest::TS::LOG, "file %s can not be read or written\n", fullResultFilename.c_str() );
            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ARG_CHECK );
            return;
        }
        resFS << "stereo_matching" << "{";
    }

    int progress = 0, caseCount = (int)caseNames.size();
    for( int ci = 0; ci < caseCount; ci++)
    {
        progress = update_progress( progress, ci, caseCount, 0 );
        printf("progress: %d%%\n", progress);
        fflush(stdout);
        string datasetName = caseDatasets[ci];
        string datasetFullDirName = dataPath + DATASETS_DIR + datasetName + "/";
        Mat leftImg = imread(datasetFullDirName + LEFT_IMG_NAME);
        Mat rightImg = imread(datasetFullDirName + RIGHT_IMG_NAME);
        Mat trueLeftDisp = imread(datasetFullDirName + TRUE_LEFT_DISP_NAME, 0);
        Mat trueRightDisp = imread(datasetFullDirName + TRUE_RIGHT_DISP_NAME, 0);

        if( leftImg.empty() || rightImg.empty() || trueLeftDisp.empty() )
        {
            ts->printf( cvtest::TS::LOG, "images or left ground-truth disparities of dataset %s can not be read", datasetName.c_str() );
            code = cvtest::TS::FAIL_INVALID_TEST_DATA;
            continue;
        }
        int dispScaleFactor = datasetsParams[datasetName].dispScaleFactor;
        Mat tmp; trueLeftDisp.convertTo( tmp, CV_32FC1, 1.f/dispScaleFactor ); trueLeftDisp = tmp; tmp.release();
        if( !trueRightDisp.empty() )
            trueRightDisp.convertTo( tmp, CV_32FC1, 1.f/dispScaleFactor ); trueRightDisp = tmp; tmp.release();

        Mat leftDisp, rightDisp;
        int ignBorder = max(runStereoMatchingAlgorithm(leftImg, rightImg, leftDisp, rightDisp, ci), EVAL_IGNORE_BORDER);
        leftDisp.convertTo( tmp, CV_32FC1 ); leftDisp = tmp; tmp.release();
        rightDisp.convertTo( tmp, CV_32FC1 ); rightDisp = tmp; tmp.release();

        int tempCode = processStereoMatchingResults( resFS, ci, isWrite,
                   leftImg, rightImg, trueLeftDisp, trueRightDisp, leftDisp, rightDisp, QualityEvalParams(ignBorder));
        code = tempCode==cvtest::TS::OK ? code : tempCode;
    }

    if( isWrite )
        resFS << "}"; // "stereo_matching"

    ts->set_failed_test_info( code );
}
Ejemplo n.º 17
0
    void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response)
    {
        std::vector<Mat> images;
        src.getMatVector(images);
        Mat times = _times.getMat();

        CV_Assert(images.size() == times.total());
        checkImageDimensions(images);
        CV_Assert(images[0].depth() == CV_8U);

        int channels = images[0].channels();
        Size size = images[0].size();
        int CV_32FCC = CV_MAKETYPE(CV_32F, channels);

        dst.create(images[0].size(), CV_32FCC);
        Mat result = dst.getMat();

        Mat response = input_response.getMat();

        if(response.empty()) {
            response = linearResponse(channels);
            response.at<Vec3f>(0) = response.at<Vec3f>(1);
        }
        log(response, response);
        CV_Assert(response.rows == LDR_SIZE && response.cols == 1 &&
                  response.channels() == channels);

        Mat exp_values(times);
        log(exp_values, exp_values);

        result = Mat::zeros(size, CV_32FCC);
        std::vector<Mat> result_split;
        split(result, result_split);
        Mat weight_sum = Mat::zeros(size, CV_32F);

        for(size_t i = 0; i < images.size(); i++) {
            std::vector<Mat> splitted;
            split(images[i], splitted);

            Mat w = Mat::zeros(size, CV_32F);
            for(int c = 0; c < channels; c++) {
                LUT(splitted[c], weights, splitted[c]);
                w += splitted[c];
            }
            w /= channels;

            Mat response_img;
            LUT(images[i], response, response_img);
            split(response_img, splitted);
            for(int c = 0; c < channels; c++) {
                result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i));
            }
            weight_sum += w;
        }
        weight_sum = 1.0f / weight_sum;
        for(int c = 0; c < channels; c++) {
            result_split[c] = result_split[c].mul(weight_sum);
        }
        merge(result_split, result);
        exp(result, result);
    }
Ejemplo n.º 18
0
/**
* @main function
**/
int main(int argc,char const *argv[])
{
    if (argc != 5)
    {
        printf("Invalid argumen!\n");
        printf("-- LightMusic <camera_number> <buffer_length> <low_freq> <hi_freq>\n");
        printf("-- Press Esc to exit\n");
        printf("ex : LightMusic 1 5620 261 1760\n");
        printf("-- <camera_number>  : device number of camera (from 1 to 99)\n");
        printf("-- <buffer_lenght>  : buffer lenght used (from 1000 to 20000)\n");
        printf("-- <low_freq>       : freq of lowest tone, low 261, mid 523, hi 1046\n");
        printf("-- <hi_freq>        : freq of highest tone, low 493, mid 987, hi 1760\n");
        printf("CAUTION!!\n");
        printf("-- bigger number of buffer length, slower frame scan run\n");
        printf("-- smaller number of buffer length, bigger playback sound glitch occur\n");
        printf("-- find right number of buffer length depending on your hardware\n");
        printf("LightMusic -- developed by Lonehack\n");
        return 0;
    }
    int cam = atoi(argv[1]);
    BUFFER_LEN = atoi(argv[2]);
    lotone = atoi(argv[3]);
    hitone = atoi(argv[4]);
	//-- Video prepare
	VideoCapture capture;
	capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, 360);

	Mat frame;
	time_t start, finish;

	//-- Sound error handling
    if ((err = snd_pcm_open(&handle, device, SND_PCM_STREAM_PLAYBACK, 0)) < 0)
    {
        printf(" --(!) Playback open error: %s  --\n", snd_strerror(err));
        exit(EXIT_FAILURE);
    }
    if ((err = snd_pcm_set_params(handle,
                                  SND_PCM_FORMAT_FLOAT,
                                  SND_PCM_ACCESS_RW_INTERLEAVED,
                                  1,
                                  44100,		//samplerate, standart 44100
                                  1,
                                  80200)) < 0)	//latency, standart 2x samplerate
    {
        printf(" --(!) Playback open error: %s --\n", snd_strerror(err));
        exit(EXIT_FAILURE);
    }

	//-- Opening video stream
//	for (int cam=1;cam<100;cam++)
//	{
//        capture.open( cam );	//-- opening input : ( -1 ) any camera or camera number (1,...,99), ( argv[1] ) video file
//    }
    capture.open( cam ); //-- opening input : ( -1 ) any camera or camera number (1,...,99), ( argv[1] ) video file

	//-- Checking interface
	if ( ! capture.isOpened() )
	{
		printf("--(!)Error opening video capture --\n");
		return -1;
	}

	//-- Start the clock
    time(&start);
    int counter=0;

	//-- Read captured
	while ( capture.read(frame) )
	{

		if( frame.empty() )
		{
			printf(" --(!) No captured frame -- Break!\n");
			break;
		}

        //-- fix image resolution
        resize(frame, frame, Size(640, 360), 0, 0, INTER_CUBIC);

		//-- Show original frame
		//namedWindow(window_name_0,CV_WINDOW_NORMAL|CV_WINDOW_KEEPRATIO);
		//imshow( window_name_0,frame );

		//-- flip frame
        flip(frame, frame, -1);

		//-- Apply the lightDetect
		lightDetect(frame);
		//printf("X = %d, Y = %d, Inten = %d \n", PosX, PosY, inten_frame);

		//-- apply sound parameter
		SineWave(PosX, PosY);

		//Stop the clock and show FPS
        time(&finish);
        counter++;
        double sec=difftime(finish,start);
        double fps=counter/sec;
        printf("fps = %lf\n",fps);

		//-- bail out if escape was pressed
		int c = waitKey(10);
		if( (char)c == 27 )
		{
			printf("\nStoped by User\n");
			break;
		}
	}

	//-- Closing program
	snd_pcm_close(handle);
	capture.release();
	return 0;
}
Ejemplo n.º 19
0
int main(int argc, char** argv)
{
	VideoCapture capture;
	char* video = argv[1];
	int flag = arg_parse(argc, argv);
	capture.open(video);

	if(!capture.isOpened()) {
		fprintf(stderr, "Could not initialize capturing..\n");
		return -1;
	}

	int frame_num = 0;
	TrackInfo trackInfo;
	DescInfo hogInfo, hofInfo, mbhInfo;

	InitTrackInfo(&trackInfo, track_length, init_gap);
	InitDescInfo(&hogInfo, 8, false, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&hofInfo, 9, true, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&mbhInfo, 8, false, patch_size, nxy_cell, nt_cell);

	SeqInfo seqInfo;
	InitSeqInfo(&seqInfo, video);

	std::vector<Frame> bb_list;
	if(bb_file) {
		LoadBoundBox(bb_file, bb_list);
		assert(bb_list.size() == seqInfo.length);
	}

	if(flag)
		seqInfo.length = end_frame - start_frame + 1;

//	fprintf(stderr, "video size, length: %d, width: %d, height: %d\n", seqInfo.length, seqInfo.width, seqInfo.height);

	if(show_track == 1)
		namedWindow("DenseTrackStab", 0);

	SurfFeatureDetector detector_surf(200);
	SurfDescriptorExtractor extractor_surf(true, true);

	std::vector<Point2f> prev_pts_flow, pts_flow;
	std::vector<Point2f> prev_pts_surf, pts_surf;
	std::vector<Point2f> prev_pts_all, pts_all;

	std::vector<KeyPoint> prev_kpts_surf, kpts_surf;
	Mat prev_desc_surf, desc_surf;
	Mat flow, human_mask;

	Mat image, prev_grey, grey;

	std::vector<float> fscales(0);
	std::vector<Size> sizes(0);

	std::vector<Mat> prev_grey_pyr(0), grey_pyr(0), flow_pyr(0), flow_warp_pyr(0);
	std::vector<Mat> prev_poly_pyr(0), poly_pyr(0), poly_warp_pyr(0);

	std::vector<std::list<Track> > xyScaleTracks;
	int init_counter = 0; // indicate when to detect new feature points
	while(true) {
		Mat frame;
		int i, j, c;

		// get a new frame
		capture >> frame;
		if(frame.empty())
			break;

		if(frame_num < start_frame || frame_num > end_frame) {
			frame_num++;
			continue;
		}

		if(frame_num == start_frame) {
			image.create(frame.size(), CV_8UC3);
			grey.create(frame.size(), CV_8UC1);
			prev_grey.create(frame.size(), CV_8UC1);

			InitPry(frame, fscales, sizes);

			BuildPry(sizes, CV_8UC1, prev_grey_pyr);
			BuildPry(sizes, CV_8UC1, grey_pyr);
			BuildPry(sizes, CV_32FC2, flow_pyr);
			BuildPry(sizes, CV_32FC2, flow_warp_pyr);

			BuildPry(sizes, CV_32FC(5), prev_poly_pyr);
			BuildPry(sizes, CV_32FC(5), poly_pyr);
			BuildPry(sizes, CV_32FC(5), poly_warp_pyr);

			xyScaleTracks.resize(scale_num);

			frame.copyTo(image);
			cvtColor(image, prev_grey, CV_BGR2GRAY);

			for(int iScale = 0; iScale < scale_num; iScale++) {
				if(iScale == 0)
					prev_grey.copyTo(prev_grey_pyr[0]);
				else
					resize(prev_grey_pyr[iScale-1], prev_grey_pyr[iScale], prev_grey_pyr[iScale].size(), 0, 0, INTER_LINEAR);

				// dense sampling feature points
				std::vector<Point2f> points(0);
				DenseSample(prev_grey_pyr[iScale], points, quality, min_distance);

				// save the feature points
				std::list<Track>& tracks = xyScaleTracks[iScale];
				for(i = 0; i < points.size(); i++)
					tracks.push_back(Track(points[i], trackInfo, hogInfo, hofInfo, mbhInfo));
			}

			// compute polynomial expansion
			my::FarnebackPolyExpPyr(prev_grey, prev_poly_pyr, fscales, 7, 1.5);

			human_mask = Mat::ones(frame.size(), CV_8UC1);
			if(bb_file)
				InitMaskWithBox(human_mask, bb_list[frame_num].BBs);

			detector_surf.detect(prev_grey, prev_kpts_surf, human_mask);
			extractor_surf.compute(prev_grey, prev_kpts_surf, prev_desc_surf);

			frame_num++;
			continue;
		}

		init_counter++;
		frame.copyTo(image);
		cvtColor(image, grey, CV_BGR2GRAY);

		// match surf features
		if(bb_file)
			InitMaskWithBox(human_mask, bb_list[frame_num].BBs);
		detector_surf.detect(grey, kpts_surf, human_mask);
		extractor_surf.compute(grey, kpts_surf, desc_surf);
		ComputeMatch(prev_kpts_surf, kpts_surf, prev_desc_surf, desc_surf, prev_pts_surf, pts_surf);

		// compute optical flow for all scales once
		my::FarnebackPolyExpPyr(grey, poly_pyr, fscales, 7, 1.5);
		my::calcOpticalFlowFarneback(prev_poly_pyr, poly_pyr, flow_pyr, 10, 2);

		MatchFromFlow(prev_grey, flow_pyr[0], prev_pts_flow, pts_flow, human_mask);
		MergeMatch(prev_pts_flow, pts_flow, prev_pts_surf, pts_surf, prev_pts_all, pts_all);

		Mat H = Mat::eye(3, 3, CV_64FC1);
		if(pts_all.size() > 50) {
			std::vector<unsigned char> match_mask;
			Mat temp = findHomography(prev_pts_all, pts_all, RANSAC, 1, match_mask);
			if(countNonZero(Mat(match_mask)) > 25)
				H = temp;
		}

		Mat H_inv = H.inv();
		Mat grey_warp = Mat::zeros(grey.size(), CV_8UC1);
		MyWarpPerspective(prev_grey, grey, grey_warp, H_inv); // warp the second frame

		// compute optical flow for all scales once
		my::FarnebackPolyExpPyr(grey_warp, poly_warp_pyr, fscales, 7, 1.5);
		my::calcOpticalFlowFarneback(prev_poly_pyr, poly_warp_pyr, flow_warp_pyr, 10, 2);

		for(int iScale = 0; iScale < scale_num; iScale++) {
			if(iScale == 0)
				grey.copyTo(grey_pyr[0]);
			else
				resize(grey_pyr[iScale-1], grey_pyr[iScale], grey_pyr[iScale].size(), 0, 0, INTER_LINEAR);

			int width = grey_pyr[iScale].cols;
			int height = grey_pyr[iScale].rows;

			// compute the integral histograms
			DescMat* hogMat = InitDescMat(height+1, width+1, hogInfo.nBins);
			HogComp(prev_grey_pyr[iScale], hogMat->desc, hogInfo);

			DescMat* hofMat = InitDescMat(height+1, width+1, hofInfo.nBins);
			HofComp(flow_warp_pyr[iScale], hofMat->desc, hofInfo);

			DescMat* mbhMatX = InitDescMat(height+1, width+1, mbhInfo.nBins);
			DescMat* mbhMatY = InitDescMat(height+1, width+1, mbhInfo.nBins);
			MbhComp(flow_warp_pyr[iScale], mbhMatX->desc, mbhMatY->desc, mbhInfo);

			// track feature points in each scale separately
			std::list<Track>& tracks = xyScaleTracks[iScale];
			for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end();) {
				int index = iTrack->index;
				Point2f prev_point = iTrack->point[index];
				int x = std::min<int>(std::max<int>(cvRound(prev_point.x), 0), width-1);
				int y = std::min<int>(std::max<int>(cvRound(prev_point.y), 0), height-1);

				Point2f point;
				point.x = prev_point.x + flow_pyr[iScale].ptr<float>(y)[2*x];
				point.y = prev_point.y + flow_pyr[iScale].ptr<float>(y)[2*x+1];
 
				if(point.x <= 0 || point.x >= width || point.y <= 0 || point.y >= height) {
					iTrack = tracks.erase(iTrack);
					continue;
				}

				iTrack->disp[index].x = flow_warp_pyr[iScale].ptr<float>(y)[2*x];
				iTrack->disp[index].y = flow_warp_pyr[iScale].ptr<float>(y)[2*x+1];

				// get the descriptors for the feature point
				RectInfo rect;
				GetRect(prev_point, rect, width, height, hogInfo);
				GetDesc(hogMat, rect, hogInfo, iTrack->hog, index);
				GetDesc(hofMat, rect, hofInfo, iTrack->hof, index);
				GetDesc(mbhMatX, rect, mbhInfo, iTrack->mbhX, index);
				GetDesc(mbhMatY, rect, mbhInfo, iTrack->mbhY, index);
				iTrack->addPoint(point);

				// draw the trajectories at the first scale
				if(show_track == 1 && iScale == 0)
					DrawTrack(iTrack->point, iTrack->index, fscales[iScale], image);

				// if the trajectory achieves the maximal length
				if(iTrack->index >= trackInfo.length) {
					std::vector<Point2f> trajectory(trackInfo.length+1);
					for(int i = 0; i <= trackInfo.length; ++i)
						trajectory[i] = iTrack->point[i]*fscales[iScale];
				
					std::vector<Point2f> displacement(trackInfo.length);
					for (int i = 0; i < trackInfo.length; ++i)
						displacement[i] = iTrack->disp[i]*fscales[iScale];
	
					float mean_x(0), mean_y(0), var_x(0), var_y(0), length(0);
					if(IsValid(trajectory, mean_x, mean_y, var_x, var_y, length) && IsCameraMotion(displacement)) {
						// output the trajectory
						printf("%d\t%f\t%f\t%f\t%f\t%f\t%f\t", frame_num, mean_x, mean_y, var_x, var_y, length, fscales[iScale]);

						// for spatio-temporal pyramid
						printf("%f\t", std::min<float>(std::max<float>(mean_x/float(seqInfo.width), 0), 0.999));
						printf("%f\t", std::min<float>(std::max<float>(mean_y/float(seqInfo.height), 0), 0.999));
						printf("%f\t", std::min<float>(std::max<float>((frame_num - trackInfo.length/2.0 - start_frame)/float(seqInfo.length), 0), 0.999));
					
						// output the trajectory
						for (int i = 0; i < trackInfo.length; ++i)
							printf("%f\t%f\t", displacement[i].x, displacement[i].y);
		
						PrintDesc(iTrack->hog, hogInfo, trackInfo);
						PrintDesc(iTrack->hof, hofInfo, trackInfo);
						PrintDesc(iTrack->mbhX, mbhInfo, trackInfo);
						PrintDesc(iTrack->mbhY, mbhInfo, trackInfo);
						printf("\n");
					}

					iTrack = tracks.erase(iTrack);
					continue;
				}
				++iTrack;
			}
			ReleDescMat(hogMat);
			ReleDescMat(hofMat);
			ReleDescMat(mbhMatX);
			ReleDescMat(mbhMatY);

			if(init_counter != trackInfo.gap)
				continue;

			// detect new feature points every gap frames
			std::vector<Point2f> points(0);
			for(std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); iTrack++)
				points.push_back(iTrack->point[iTrack->index]);

			DenseSample(grey_pyr[iScale], points, quality, min_distance);
			// save the new feature points
			for(i = 0; i < points.size(); i++)
				tracks.push_back(Track(points[i], trackInfo, hogInfo, hofInfo, mbhInfo));
		}

		init_counter = 0;
		grey.copyTo(prev_grey);
		for(i = 0; i < scale_num; i++) {
			grey_pyr[i].copyTo(prev_grey_pyr[i]);
			poly_pyr[i].copyTo(prev_poly_pyr[i]);
		}

		prev_kpts_surf = kpts_surf;
		desc_surf.copyTo(prev_desc_surf);

		frame_num++;

		if( show_track == 1 ) {
			imshow( "DenseTrackStab", image);
			c = cvWaitKey(3);
			if((char)c == 27) break;
		}
	}

	if( show_track == 1 )
		destroyWindow("DenseTrackStab");

	return 0;
}
Ejemplo n.º 20
0
int parseCmdArgs(int argc, char** argv)
{
    if (argc == 1)
    {
        printUsage(argv);
        return -1;
    }

    for (int i = 1; i < argc; ++i)
    {
        if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
        {
            printUsage(argv);
            return -1;
        }
        else if (string(argv[i]) == "--try_use_gpu")
        {
            if (string(argv[i + 1]) == "no")
                try_use_gpu = false;
            else if (string(argv[i + 1]) == "yes")
                try_use_gpu = true;
            else
            {
                cout << "Bad --try_use_gpu flag value\n";
                return -1;
            }
            i++;
        }
        else if (string(argv[i]) == "--d3")
        {
            divide_images = true;
        }
        else if (string(argv[i]) == "--output")
        {
            result_name = argv[i + 1];
            i++;
        }
        else if (string(argv[i]) == "--mode")
        {
            if (string(argv[i + 1]) == "panorama")
                mode = Stitcher::PANORAMA;
            else if (string(argv[i + 1]) == "scans")
                mode = Stitcher::SCANS;
            else
            {
                cout << "Bad --mode flag value\n";
                return -1;
            }
            i++;
        }
        else
        {
            Mat img = imread(argv[i]);
            if (img.empty())
            {
                cout << "Can't read image '" << argv[i] << "'\n";
                return -1;
            }

            if (divide_images)
            {
                Rect rect(0, 0, img.cols / 2, img.rows);
                imgs.push_back(img(rect).clone());
                rect.x = img.cols / 3;
                imgs.push_back(img(rect).clone());
                rect.x = img.cols / 2;
                imgs.push_back(img(rect).clone());
            }
            else
                imgs.push_back(img);
        }
    }
    return 0;
}
Ejemplo n.º 21
0
int process(VideoCapture& capture) {
    long captureTime;
    cout << "Press q or escape to quit!" << endl;

    CvFont infoFont;
    cvInitFont(&infoFont, CV_FONT_HERSHEY_SIMPLEX, 1, 1);

    namedWindow(VIDEO_WINDOW_NAME, CV_WINDOW_AUTOSIZE);
    namedWindow(ERODE_PREVIEW_WIN_NAME, CV_WINDOW_NORMAL);
    resizeWindow(ERODE_PREVIEW_WIN_NAME, 320, 240);
    ControlsWindow* controlsWindow = new ControlsWindow();

    if(fileExists(preferenceFileName)) {
        loadSettings(controlsWindow, (char*)preferenceFileName);
    }

    Mat frame;
    while (true) {
        capture >> frame;
        captureTime = (int)(getTickCount()/getTickFrequency())*1000;

        if (frame.empty())
            break;

        int target_width = 320;
        int height = (target_width/capture.get(3 /*width*/)) * capture.get(4 /*height*/);
        resize(frame, frame, Size(target_width, height));

        if (controlsWindow->getBlurDeviation() > 0) {
            GaussianBlur(frame, frame, Size(GAUSSIAN_KERNEL, GAUSSIAN_KERNEL), controlsWindow->getBlurDeviation());
        }

        //Apply brightness and contrast
        frame.convertTo(frame, -1, controlsWindow->getContrast(), controlsWindow->getBrightness());

        Mat maskedImage = thresholdImage(controlsWindow, frame);

        Mat erodedImage = erodeDilate(maskedImage, controlsWindow);

        Mat erodedImageBinary;

        cvtColor(erodedImage, erodedImageBinary, COLOR_BGR2GRAY);
        threshold(erodedImageBinary, erodedImageBinary, 0, 255, CV_THRESH_BINARY);

        if(controlsWindow->getInvert()) {
            erodedImageBinary = 255 - erodedImageBinary;
        }

        cv::SimpleBlobDetector::Params params;
        params.minDistBetweenBlobs = 50.0f;
        params.filterByInertia = false;
        params.filterByConvexity = false;
        params.filterByColor = true;
        params.filterByCircularity = false;
        params.filterByArea = true;
        params.minArea = 1000.0f;
        params.maxArea = 100000.0f;
        params.blobColor = 255;

        vector<KeyPoint> centers;
        vector<vector<Point>> contours;
        ModBlobDetector* blobDetector = new ModBlobDetector(params);

        vector<vector<Point>> contourHulls;
        vector<RotatedRect> contourRects;
        blobDetector->findBlobs(erodedImageBinary, erodedImageBinary, centers, contours);
        for(vector<Point> ctpts : contours) {
            vector<Point> hull;
            convexHull(ctpts, hull);
            contourHulls.push_back(hull);
            contourRects.push_back(minAreaRect(hull));
        }
#ifdef DEBUG_BLOBS
        drawContours(frame, contours, -1, Scalar(128,255,128), 2, CV_AA);
        drawContours(frame, contourHulls, -1, Scalar(255, 128,0), 2, CV_AA);
        int ptnum;
        for(KeyPoint pt : centers) {
            Scalar color(255, 0, 255);
            circle(frame, pt.pt, 5
                   , color, -1 /*filled*/, CV_AA);
            circle(frame, pt.pt, pt.size, color, 1, CV_AA);
            ptnum++;
        }
#endif
        for(RotatedRect rr : contourRects) {
            Point2f points[4];
            rr.points(points);
            float side1 = distance(points[0], points[1]);
            float side2 = distance(points[1], points[2]);

            float shortestSide = min(side1, side2);
            float longestSide = max(side1, side2);
            float aspectRatio = longestSide/shortestSide;
            int b = 0;
            bool isTape = objInfo.aspectRatio == 0 ? false :
                          abs(objInfo.aspectRatio - aspectRatio) < 0.2*objInfo.aspectRatio;
            /*
             * TODO
             * Make a list of possible tape candidates
             * Use tape candidate with smallest difference in ratio to the real ratio as the tape
             */
            if(isTape) {
                b = 255;
                string widthText = "Width (px): ";
                widthText.append(toString(longestSide));
                string heightText = "Height (px): ";
                heightText.append(toString(shortestSide));
                string rotText = "Rotation (deg): ";
                rotText.append(toString(abs((int)rr.angle)));
                string distText;
                if(camSettings.focalLength == -1) {
                    distText = "Focal length not defined";
                } else {
                    float dist = objInfo.width * camSettings.focalLength / longestSide;
                    distText = "Distance (cm): ";
                    distText.append(toString(dist));
                }
                putText(frame, widthText, Point(0, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
                putText(frame, heightText, Point(0, 40), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
                putText(frame, rotText, Point(0, 60), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
                putText(frame, distText, Point(0, 80), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
            }

            rotated_rect(frame, rr, Scalar(b, 0, 255));
            if(isTape)break;
        }
        if(objInfo.aspectRatio == 0) {
            putText(frame, "Invalid object info (object.xml)", Point(0, 20), CV_FONT_HERSHEY_SIMPLEX, 0.5f, Scalar(0, 255, 255));
        }
        delete blobDetector;

        imshow(ERODE_PREVIEW_WIN_NAME, erodedImageBinary);

        imshow(VIDEO_WINDOW_NAME, frame);

        //int waitTime = max((int)(((1.0/framerate)*1000)
        //                   - ((int)(getTickCount()/getTickFrequency())*1000 - captureTime))
        //                   , 1);
        char key = (char)waitKey(1);
        switch (key) {
        case 'q':
        case 'Q':
        case 27: //escape
            saveSettings(controlsWindow, (char*)preferenceFileName);
            return 0;
        default:
            break;
        }
        std::this_thread::yield();
    }

    saveSettings(controlsWindow, (char*)preferenceFileName);
    delete(controlsWindow);

    destroyAllWindows();
    return 0;
}
Ejemplo n.º 22
0
int main(int argc, char **argv) {
    
    //initialize the random number generator
    srand (time(NULL));
    
    VideoCapture cap(0);
    //video shows the current scene
    namedWindow( "video");

    setMouseCallback("video", mouseCallBackFunc, NULL);
    
    
    Mat roiOutputImageLast;
    SpotIt spotIt(&keyboardCallbackRegistry, &mouseCallbackRegistry);
    bool processNextFrame = true;
    bool gracefulExit = false;
    for(; !gracefulExit  ;) {
        //for each frame
        Mat frame;
        Mat roiOutputImage;
        cap >> frame;
        Mat image, tempImage, grayImage;
        if( frame.empty() )
            break;
        frame.copyTo(image);
        
        //first detect the single circle
        cvtColor(frame, tempImage, CV_BGR2GRAY);
        pyrDown(tempImage, grayImage, Size(frame.cols/2, frame.rows/2));
        vector<Vec3f> circles;
        HoughCircles(
                     grayImage,        //input image
                     circles,               //output circles
                     CV_HOUGH_GRADIENT,
                     1,                      //
                     grayImage.rows/8,  //
                     250,                        //upperThresholdForInternalCannyEdgeDetect
                     50,                          //thresholdForCenterDetect
                     40,                          //minRadius
                     80                           //maxRadius,
                     );
        vector<int> numItemsInCluster;
        if(circles.size()>0 && processNextFrame ) {
            double startTime = getTickCount();

            //draw only the first circle
            //todo: this need be extended to detect two circular regions
            Point circleCenter(  round(circles[0][0])*2, round(circles[0][1])*2);
            double circleRadius = circles[0][2]*2;
            spotIt.processCircle(
                                 circleCenter, //center of circle detected
                                 circleRadius,  //radius of circle detected
                                 image,  //input BGR image
                                 roiOutputImage//output BGR image of processed region
                                 );
            roiOutputImage.copyTo(roiOutputImageLast);
            double endTime = getTickCount();

            cout << "frameRate: " <<  getTickFrequency()/(endTime - startTime) << endl;
            processNextFrame = false;
        }
        if(!roiOutputImageLast.empty()) {
            Mat dst;
            //Size sz = roiOutputImageLast.size();
            Mat image_reg = image(Rect(0,0,roiOutputImageLast.cols, roiOutputImageLast.rows));
            Size s1 = image_reg.size();
            Size s2 = roiOutputImageLast.size();
            addWeighted(image_reg, 0, roiOutputImageLast, 1, 0, dst);
            dst.copyTo(image(Rect(0,0,roiOutputImageLast.cols, roiOutputImageLast.rows)));
        }
        imshow("video", image);
        
        strstream ss;
        
        ofstream myfile;
        char c = (char)waitKey(70);
        key_callback(c, roiOutputImageLast, processNextFrame, gracefulExit  );
    }
    cap.release();
    destroyAllWindows();
    
}
Ejemplo n.º 23
0
bool initTrackingPoints(Mat &queryImg)
{
	Mat H;
	Mat templImg = imread(tmplImgPath);
	if(templImg.empty()) return false;

	vector<KeyPoint> templKeys;
	Mat templDesc;

	vector<KeyPoint> queryKeys;
	Mat queryDesc;


	cv::Ptr<cv::FeatureDetector> detector;
	double keyThresh = 30;
	detector = new BriskFeatureDetector(keyThresh,4);

	cv::Ptr<cv::DescriptorExtractor> descriptorExtractor;
	descriptorExtractor = new BriskDescriptorExtractor();

	Mat templGray;
	cvtColor(templImg, templGray, CV_BGR2GRAY);

	detector->detect(templGray, templKeys);
	descriptorExtractor->compute(templGray, templKeys, templDesc);

	Mat queryGray;
	cvtColor(queryImg, queryGray, CV_BGR2GRAY);

	detector->detect(queryGray, queryKeys);
	descriptorExtractor->compute(queryGray, queryKeys, queryDesc);

	// find matches
	vector<DMatch> _matches;

	cv::Ptr<cv::DescriptorMatcher> descriptorMatcher;
	descriptorMatcher = new cv::BruteForceMatcher<cv::HammingSse>();
	descriptorMatcher->match(queryDesc, templDesc, _matches);

	vector<DMatch> matches;
	for(int i = 0; i < (int)_matches.size(); i++){
		if(_matches[i].distance < 100) matches.push_back(_matches[i]);
	}

	cout << "match keypoint number: " << matches.size() << endl;

	// convert keys to center aligned.
	float scale = 1;
	for(int i = 0; i < (int) templKeys.size(); i++){
		/*templKeys[i].pt.x = (templKeys[i].pt.x-templImg.cols/2)/scale;
		templKeys[i].pt.y = (templImg.rows/2 - templKeys[i].pt.y)/scale;*/
		templKeys[i].pt.x = (templKeys[i].pt.x)/scale;
		templKeys[i].pt.y = (templKeys[i].pt.y)/scale;
	}
	// find homography
	// queryKeys = H*TemplKeys
	vector<DMatch> goodMatches;
	findHomography(queryKeys, templKeys, matches, goodMatches, H, CV_RANSAC, 3);

	cout << "good match keypoint number: " << goodMatches.size() << endl;

	// find good point to track.
	vector<KeyPoint> trackPtCandis;
	for( int i = 0; i < (int) goodMatches.size(); i++ ){
		trackPtCandis.push_back( queryKeys[goodMatches[i].queryIdx] );
	}

	sort(trackPtCandis.begin(), trackPtCandis.end(), KeyPointReponseLarger);

	if( trackPtCandis.size() < MIN_TRACK_PT_NUM ) return false;
	// pick the first 200
	trackPtsPre.clear();
	trackPtsCur.clear();
	for( int i = 0; i < MIN(MAX_TRACK_PT_NUM, trackPtCandis.size()); i++ ){
		trackPtsPre.push_back(trackPtCandis[i].pt);
	}

	return true;

}
int main (int argc, char **argv)
{

	vector<string> arguments = get_arguments(argc, argv);

	// Some initial parameters that can be overriden from command line	
	vector<string> files, dDirs, outposes, outvideos, outfeatures;
	
	// By default try webcam
	int device = 0;

	// cx and cy aren't always half dimx or half dimy, so need to be able to override it (start with unit vals and init them if none specified)
    float fx = 500, fy = 500, cx = 0, cy = 0;
	int dimx = 0, dimy = 0;

	bool useCLMTracker = true;
	
	CLMWrapper::CLMParameters clmParams(arguments);
	
	clmParams.wSizeCurrent = clmParams.wSizeInit;

    PoseDetectorHaar::PoseDetectorHaarParameters haarParams;

	#if OS_UNIX
    haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt.xml";
	#else
		haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt.xml";
	#endif
		
	// Get the input output file parameters
	CLMWrapper::get_video_input_output_params(files, dDirs, outposes, outvideos, outfeatures, arguments);
	// Get camera parameters
	CLMWrapper::get_camera_params(fx, fy, cx, cy, dimx, dimy, arguments);    
	
	// The modules that are being used for tracking
	CLMTracker::TrackerCLM clmModel;	
	
	// Face detector initialisation
	CascadeClassifier classifier(haarParams.ClassifierLocation);
	if(classifier.empty())
	{
		string err = "Could not open a face detector at: " + haarParams.ClassifierLocation;
		FATAL_STREAM( err );
	}

	bool done = false;
	
	int f_n = -1;

	while(!done)
	{
		string file;

		// We might specify multiple video files as arguments
		if(files.size() > 0)
		{
			f_n++;			
		    file = files[f_n];
		}

		bool readDepth = !dDirs.empty();	

		// Do some grabbing
		VideoCapture vCap;
		if( file.size() > 0 )
		{
			INFO_STREAM( "Attempting to read from file: " << file );
			vCap = VideoCapture( file );
		}
		else
		{
			INFO_STREAM( "Attempting to capture from device: " << device );
			vCap = VideoCapture( device );

			// Read a first frame often empty in camera
			Mat img;
			vCap >> img;
		}

		if( !vCap.isOpened() ) FATAL_STREAM( "Failed to open video source" );
		else INFO_STREAM( "Device or file opened");

		Mat img;
		vCap >> img;

		// If no dimensions defined, do not do any resizing
		if(dimx == 0 || dimy == 0)
		{
			dimx = img.cols;
			dimy = img.rows;
		}
	
		// If optical centers are not defined just use center of image
		if(cx == 0 || cy == 0)
		{
			cx = dimx / 2.0f;
			cy = dimy / 2.0f;
		}
	
		// Creating output files
		std::ofstream posesFile;
		if(!outposes.empty())
		{
			posesFile.open (outposes[f_n]);
		}
	
		std::ofstream featuresFile;		
		if(!outfeatures.empty())
		{
			featuresFile.open(outfeatures[f_n]);
		}
	
		int frameProc = 0;

		// faces in a row detected
		int facesInRow = 0;

		// saving the videos
		VideoWriter writerFace;
		if(!outvideos.empty())
		{
			writerFace = VideoWriter(outvideos[f_n], CV_FOURCC('D','I','V','X'), 30, img.size(), true);		
		}

		// Variables useful for the tracking itself
		bool success = false;
		bool trackingInitialised = false;
	
		// For measuring the timings
		int64 t1,t0 = cv::getTickCount();
		double fps = 10;

		Mat disp;

		INFO_STREAM( "Starting tracking");
		while(!img.empty())
		{		

			Mat_<float> depth;
			Mat_<uchar> gray;
			cvtColor(img, gray, CV_BGR2GRAY);
		
			// Don't resize if it's unneeded
			Mat_<uchar> img_scaled;		
			if(dimx != gray.cols || dimy != gray.rows)
			{
				resize( gray, img_scaled, Size( dimx, dimy ) );
				resize(img, disp, Size( dimx, dimy));
			}
			else
			{
				img_scaled = gray;
				disp = img.clone();
			}
		
			namedWindow("colour",1);

			// Get depth image
			if(readDepth)
			{
				char* dst = new char[100];
				std::stringstream sstream;
				//sstream << dDir << "\\depth%06d.png";
				sstream << dDirs[f_n] << "\\depth%05d.png";
				sprintf(dst, sstream.str().c_str(), frameProc + 1);
				Mat_<short> dImg = imread(string(dst), -1);
				if(!dImg.empty())
				{
					if(dimx != dImg.cols || dimy != dImg.rows)
					{
						Mat_<short> dImgT;
						resize(dImg, dImgT, Size( dimx, dimy));
						dImgT.convertTo(depth, CV_32F);
					}
					else
					{
						dImg.convertTo(depth, CV_32F);
					}
				}
				else
				{
					WARN_STREAM( "Can't find depth image" );
				}
			}

			Vec6d poseEstimateHaar;
			Matx66d poseEstimateHaarUncertainty;

			Rect faceRegion;

			// The start place where CLM should start a search (or if it fails, can use the frame detection)
			if(!trackingInitialised || (!success && ( frameProc  % 2 == 0)))
			{
				INFO_STREAM( "Attempting to initialise a face");
				// The tracker can return multiple head pose observation
				vector<Vec6d> poseEstimatesInitialiser;
				vector<Matx66d> covariancesInitialiser;			
				vector<Rect> regionsInitialiser;

				bool initSuccess = PoseDetectorHaar::InitialisePosesHaar(img_scaled, depth, poseEstimatesInitialiser, covariancesInitialiser, regionsInitialiser, classifier, fx, fy, cx, cy, haarParams);
					
				if(initSuccess)
				{
					INFO_STREAM( "Face(s) detected");
					if(poseEstimatesInitialiser.size() > 1)
					{
						cout << "ambiguous detection: " << endl;
						// keep the closest one (this is a hack for the experiment)
						double best = 10000;
						int bestIndex = -1;
						for( size_t i = 0; i < poseEstimatesInitialiser.size(); ++i)
						{
							cout << regionsInitialiser[i].x << " " << regionsInitialiser[i].y <<  " " << regionsInitialiser[i].width << " " <<  regionsInitialiser[i].height << endl;
							if(poseEstimatesInitialiser[i][2] < best  && poseEstimatesInitialiser[i][2] > 100)
							{
								bestIndex = i;
								best = poseEstimatesInitialiser[i][2];
							}									
						}
						if(bestIndex != -1)
						{
							cout << "Choosing bbox:" << regionsInitialiser[bestIndex].x << " " << regionsInitialiser[bestIndex].y <<  " " << regionsInitialiser[bestIndex].width << " " <<  regionsInitialiser[bestIndex].height << endl;
							faceRegion = regionsInitialiser[bestIndex];
						}
						else
						{
							initSuccess = false;
						}
					}
					else
					{	
						faceRegion = regionsInitialiser[0];
					}				
	
					facesInRow++;
				}
			}

			// If condition for tracking is met initialise the trackers
			if(!trackingInitialised && facesInRow >= 1)
			{			
				INFO_STREAM( "Initialising CLM");
				trackingInitialised = CLMWrapper::InitialiseCLM(img_scaled, depth, clmModel, poseEstimateHaar, faceRegion, fx, fy, cx, cy, clmParams);		
				facesInRow = 0;
			}		

			// opencv detector is needed here, if tracking failed reinitialise using it
			if(trackingInitialised)
			{
				success = CLMWrapper::TrackCLM(img_scaled, depth, clmModel, vector<Vec6d>(), vector<Matx66d>(), faceRegion, fx, fy, cx, cy, clmParams);								
			}			
			if(success)
			{			
				clmParams.wSizeCurrent = clmParams.wSizeSmall;
			}
			else
			{
				clmParams.wSizeCurrent = clmParams.wSizeInit;
			}

			// Changes for no reinit version
			//success = true;
			//clmParams.wSizeCurrent = clmParams.wSizeInit;

			Vec6d poseEstimateCLM = CLMWrapper::GetPoseCLM(clmModel, fx, fy, cx, cy, clmParams);

			if(!outfeatures.empty())
			{
				featuresFile << frameProc + 1 << " " << success;
				for (int i = 0; i < 66 * 2; ++ i)
				{
					featuresFile << " " << clmModel._shape.at<double>(i) << endl;
				}
				featuresFile << endl;
			}

			if(!outposes.empty())
			{
				posesFile << frameProc + 1 << " " << (float)frameProc * 1000/30 << " " << 1 << " " << poseEstimateCLM[0] << " " << poseEstimateCLM[1] << " " << poseEstimateCLM[2] << " " << poseEstimateCLM[3] << " " << poseEstimateCLM[4] << " " << poseEstimateCLM[5] << endl;
			}										
	
			if(success)			
			{
				int idx = clmModel._clm.GetViewIdx(); 	

				// drawing the facial features on the face if tracking is successful
				clmModel._clm._pdm.Draw(disp, clmModel._shape, clmModel._clm._triangulations[idx], clmModel._clm._visi[0][idx]);

				DrawBox(disp, poseEstimateCLM, Scalar(255,0,0), 3, fx, fy, cx, cy);			
			}
			else if(!clmModel._clm._pglobl.empty())
			{			
				int idx = clmModel._clm.GetViewIdx(); 	
			
				// draw the facial features
				clmModel._clm._pdm.Draw(disp, clmModel._shape, clmModel._clm._triangulations[idx], clmModel._clm._visi[0][idx]);

				// if tracking fails draw a red outline
				DrawBox(disp, poseEstimateCLM, Scalar(0,0,255), 3, fx, fy, cx, cy);	
			}
			if(frameProc % 10 == 0)
			{      
				t1 = cv::getTickCount();
				fps = 10.0 / (double(t1-t0)/cv::getTickFrequency()); 
				t0 = t1;
			}

			char fpsC[255];
			sprintf(fpsC, "%d", (int)fps);
			string fpsSt("FPS:");
			fpsSt += fpsC;
			cv::putText(disp, fpsSt, cv::Point(10,20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255,0,0));
		
			frameProc++;
						
			imshow("colour", disp);
			if(!depth.empty())
			{
				imshow("depth", depth/2000.0);
			}

			vCap >> img;
		
			if(!outvideos.empty())
			{		
				writerFace << disp;
			}
		
			// detect key presses
			char c = cv::waitKey(2);

			// key detections

			// restart the tracker
			if(c == 'r')
			{
				trackingInitialised = false;
				facesInRow = 0;
			}
			// quit the application
			else if(c=='q')
			{
				return(0);
			}


		}
		
		trackingInitialised = false;
		facesInRow = 0;

		posesFile.close();

		// break out of the loop if done with all the files
		if(f_n == files.size() -1)
		{
			done = true;
		}
	}

	return 0;
}
Ejemplo n.º 25
0
int main(int argc, const char* argv[])
{
    const char* keys =
       "{ l | left      |       | left image file name }"
       "{ r | right     |       | right image file name }"
       "{ i | intrinsic |       | intrinsic camera parameters file name }"
       "{ e | extrinsic |       | extrinsic camera parameters file name }"
       "{ d | ndisp     | 256   | number of disparities }"
       "{ s | scale     | 1.0   | scale factor for point cloud }"
       "{ h | help      | false | print help message }";

    CommandLineParser cmd(argc, argv, keys);

    if (cmd.get<bool>("help"))
    {
        cout << "Avaible options:" << endl;
        cmd.printParams();
        return 0;
    }

    string left = cmd.get<string>("left");
    string right = cmd.get<string>("right");
    string intrinsic = cmd.get<string>("intrinsic");
    string extrinsic = cmd.get<string>("extrinsic");
    int ndisp = cmd.get<int>("ndisp");
    double scale = cmd.get<double>("scale");

    if (left.empty() || right.empty())
    {
        cout << "Missed input images" << endl;
        cout << "Avaible options:" << endl;
        cmd.printParams();
        return 0;
    }

    if (intrinsic.empty() ^ extrinsic.empty())
    {
        cout << "Boss camera parameters must be specified" << endl;
        cout << "Avaible options:" << endl;
        cmd.printParams();
        return 0;
    }

    Mat imgLeftColor = imread(left, IMREAD_COLOR);
    Mat imgRightColor = imread(right, IMREAD_COLOR);

    if (imgLeftColor.empty())
    {
        cout << "Can't load image " << left << endl;
        return -1;
    }

    if (imgRightColor.empty())
    {
        cout << "Can't load image " << right << endl;
        return -1;
    }

    Mat Q = Mat::eye(4, 4, CV_32F);
    if (!intrinsic.empty() && !extrinsic.empty())
    {
        FileStorage fs;

        // reading intrinsic parameters
        fs.open(intrinsic, CV_STORAGE_READ);
        if (!fs.isOpened())
        {
            cout << "Failed to open file " << intrinsic << endl;
            return -1;
        }

        Mat M1, D1, M2, D2;
        fs["M1"] >> M1;
        fs["D1"] >> D1;
        fs["M2"] >> M2;
        fs["D2"] >> D2;

        // reading extrinsic parameters
        fs.open(extrinsic, CV_STORAGE_READ);
        if (!fs.isOpened())
        {
            cout << "Failed to open file " << extrinsic << endl;
            return -1;
        }

        Mat R, T, R1, P1, R2, P2;
        fs["R"] >> R;
        fs["T"] >> T;

        Size img_size = imgLeftColor.size();

        Rect roi1, roi2;
        stereoRectify(M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2);

        Mat map11, map12, map21, map22;
        initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
        initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);

        Mat img1r, img2r;
        remap(imgLeftColor, img1r, map11, map12, INTER_LINEAR);
        remap(imgRightColor, img2r, map21, map22, INTER_LINEAR);

        imgLeftColor = img1r(roi1);
        imgRightColor = img2r(roi2);
    }
Ejemplo n.º 26
0
/*
*	概要:
*		マップ作成用のメインループ
*		'q'で終了
*		適切に終了しないと画像が保存されない
*	引数:
*		int URG_COM[]	接続したURGのCOMポートの配列
*		int ARDUINO_COM	接続したArduinoのCOMポート

*	返り値:
*		なし
*/
void getDataUNKOOrigin(int URG_COM[], float URGPOS[][4], int ARDUINO_COM, int NumOfURG)
{
	/**********************
	 * ↓ 変数の宣言 ↓
	 **********************/

	HANDLE handle_ARDUINO;	//Arduino用ハンドル

	urg_mapping *unkoArray = new urg_mapping[NumOfURG];	//urg_unko型変数の配列

	Timer	timer; //ループの間隔調整&インターバル測定用タイマー
	int		interval;
	timer.Start();

	float currentCoord[2] = {};	//測定開始位置から見た現在の位置

	float chairdist = 0.0;//車いすの移動量
	float chairdist_old = 0.0;

	float dist = 0.0;	//移動距離の積算用変数
	float rad = 0.0;	//回転量の積算用変数

	// 数値表示用の変数達
	string meterName[] = {"dataL","dataR", "Difference of encoder value(L-R)", "Ratio of encoder value(L/R[%])", 
							"Current coordinates X", "Current coordinates Y", "Moving distance[mm]", "Angle variation[deg]",
							"Interval[millisec]"};
	float		meterData[9] = {};

	// csFormとの懸け橋
	// ループ抜けるタイミングとかのやり取り用
	SharedMemory<int> shMemInt("MappingFormInt");
	enum {ISEND , INTERVALTIME};
	shMemInt.setShMemData(false, ISEND);

	/****************************
	* ↓ もろもろの初期化 ↓
	*****************************/

	// 姿勢表示用矢印の読み込み
	arrowpic = imread("../../res/img/arrow.jpg");
	if (arrowpic.empty()) arrowpic = imread("../../../res/img/arrow.jpg");
	if (arrowpic.empty()) cout << "No arrow image" << endl;
	arrowpic = ~arrowpic;
	//rcvDroid.getOrientationData(defaultOrientation);

	//Arduinoとシリアル通信を行うためのハンドルを取得
	getArduinoHandle(ARDUINO_COM,handle_ARDUINO);
	//エンコーダの初期化
	Encoder(handle_ARDUINO, dist, rad);

	PCImage::isColor = false;
	PCImage::BGR color[2] = { PCImage::B, PCImage::G };
	
	urg_mapping::initPCImage(imgWidth,imgHeight,imgResolution);
	urg_mapping::setPCImageOrigin(imgWidth / 2, imgHeight / 2);

	//接続したURGの数だけurg_unko型オブジェクトを初期化
	for (int i = 0; i < NumOfURG; i++)
	{
		unkoArray[i].init(URG_COM[i], URGPOS[i]);
		unkoArray[i].setWriteLine(false);
		unkoArray[i].setPCDDir();
	}

	/*********************
	* ↓ メイン処理 ↓
	**********************/

	//マップ作成を行うループ
	//'q'を入力するとループを抜ける
#ifndef KAISUU
	while (true){
#else
	for (int i = 0; i < KAISUU; i++){
#endif
		// 処理の間隔を指定時間あける
		if (timer.getLapTime(1, Timer::millisec, false) < shMemInt.getShMemData(INTERVALTIME)) continue;
		interval = timer.getLapTime();

		//エンコーダから移動量,回転量を取得
		Encoder(handle_ARDUINO, dist, rad);

		//積算した距離を格納
		chairdist = dist;

		//URGからデータを取得し,エンコーダの値を基にマップ,pcdファイルを作成
		for (int i = 0; i < NumOfURG; i++)
		{
			unkoArray[i].updateCurrentCoord(currentCoord);
			unkoArray[i].setPCImageColor(color[i]);
			unkoArray[i].writeMap(dist,chairdist_old, rad);
			unkoArray[i].saveRawPCD(dist,rad);
		}
		
		//現在の位置を更新
		//測定開始時点を基準に
		//		xの正:前
		//		yの正:左
		currentCoord[0] += cos(rad) * (chairdist - chairdist_old);
		currentCoord[1] -= sin(rad) * (chairdist - chairdist_old);

		//現在の移動量を保存
		chairdist_old = chairdist;

		//'q'が入力されたらループを抜ける
		// もしくは共有メモリの0番地に0が入力さ(ry
		if (cv::waitKey(1) == 'q' || shMemInt.getShMemData(0)) break;

		// メーターの表示を設定
		{
			meterData[0] = data_L;
			meterData[1] = data_R;
			meterData[2] = data_L - data_R;
			if (data_R)	meterData[3] = (float)data_L / (float)data_R * 100;
			else meterData[3] = 0;
			meterData[4] = currentCoord[0];
			meterData[5] = currentCoord[1];
			meterData[6] = dist;
			meterData[7] = rad / PI *180;
			meterData[8] = interval;

			meter(picture, meterData, meterName, 9);
			showDirection( -rad , ":Encoder" );
		}

	}

	//Newで確保した配列の解放
	//delete[] unkoArray;
	// 表示している画像を閉じる
	destroyAllWindows();
	//Arduinoのハンドルを解放
	CommClose(handle_ARDUINO);

	return;
}
Ejemplo n.º 27
0
int main( int argc, char** argv )
{
    String dataDir="/Users/donj/workspace/cs585/Lab7/Data/";
        
    //variables for capturing from the camera or using file input
    VideoCapture cap;
    char directory[256], filename[256];
    int startFrame(-1), endFrame (-1);
    int currentFrame = 1;
    int frameNumber=0;
    bool bRecording = false;
    addRemovePt = false;

    if( argc == 1 )
    {
        //with no arguments, use the camera
        cap.open(0);
        if( !cap.isOpened() )
        {
            cout << "Could not initialize capturing...\n";
            return 0;
        }
    }
    else if (argc == 4)
    {
        //otherwise, use a sequence of images (as in HW3)
        //You can collect an image sequence using the code from Lab3_Part3
        strcpy(directory, argv[1]);
        startFrame = atoi(argv[2]);
        endFrame = atoi(argv[3]);
        currentFrame = startFrame;
    }
    else
    {
        cout<<"Wrong number of arguments"<<endl;
        return 0;
    }

    namedWindow( "Gray", 1 );
    namedWindow( "Template Tracking", 1 );

    //The mouse callback will allow us to initialize a template in the window
    setMouseCallback( "Template Tracking", onMouse, 0 );

    Mat gray, displayGray, image;
    vector<Point2f> points; //for the locations of the templates
    vector<Mat> patches; //for the image patches

    addRemovePt = false; //this is so the mouse callback event can tell us when to add points

    for(;;)
    {
        currentFrame++;

        //if we're using an image sequence and we're at the end, stop
        if(endFrame > 0 && currentFrame > endFrame)
        {
            break;
        }

        Mat frame;
        if(cap.isOpened())
        {
            //Read from the camera
            cap >> frame;
            cap.set(CV_CAP_PROP_FPS, 0.1);
            if( frame.empty() )
                break;
            frame.copyTo(image);
            imageSize=image.size();
        }
        else
        {
Ejemplo n.º 28
0
int main( int argc, char** argv)
{
    std:string arg = argv[1];
    VideoCapture capture(arg); //try to open string, this will attempt to open it as a video file
    if (!capture.isOpened()) //if this fails, try to open as a video camera, through the use of an integer param
        capture.open(atoi(arg.c_str()));
    if (!capture.isOpened()) {
        cerr << "Failed to open a video device or video file!\n" << endl;
      //  help(av);
        return 1;
    }
  //  Mat img = Mat::zeros(w, w, CV_8UC1);
//    if(argc > 1)
 //   {
  //      help();
   //     return -1;
    //}
 //   const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
  //  Ptr<IplImage> iplimg = cvLoadImage(imagename);
   // Mat imgin(iplimg);

    Mat imgin;
    Mat img;
    namedWindow( "image", CV_WINDOW_KEEPRATIO );
    std::vector<KeyPoint> keypoints_n;
    
    Mat matArray[100];
    for(int i = 0; i < 100; i++)
    {
	    Mat imgin;
	    capture.read(imgin);
	    
	    if (imgin.empty())
                break;
            matArray[i] = imgin;
	//     imshow("image", imgin);
	//     char key = (char)waitKey(5);
    }

    for(int i = 0; i < 100; i++)
    {
	    Mat imgin;
            imgin = matArray[i];
	    //show the faces
	    imgin.convertTo(img, CV_8U);
	    cvtColor(img, img, CV_BGRA2GRAY);
	    threshold( img, img, 10, 255, 1);
    
	    
	 //   imshow( "image", img );

	    int minHessian = 2000;  //SERF 2000
	    SurfFeatureDetector detector( minHessian );

	    std::vector<KeyPoint> keypoints_1;
	   // std::vector<KeyPoint> keypoints_n;
	     detector.detect( img, keypoints_1 );

	     //-- Draw keypoints
	     Mat img_keypoints_1;
	     float x[5];
	     float y[5];
	     int t[5];

	     for(int i = 0; i < 5; i ++)
	     {
		    x[i] = -1;
		    y[i] = -1;
		    t[i] = 0;
	     }
    
	     int j = 0;
	     for(int i = 0; i < keypoints_1.size(); i++)
	     {
	     	char found = 0;
		    float kx = keypoints_1[i].pt.x;
		    float ky = keypoints_1[i].pt.y;
		    j = 0;
		    while(!found && j < 5)
		    {
		        if(t[j] == 0)
		        {
		            found = 1;
		            x[j] += kx;
		            y[j] += ky;
		            t[j]++;
		        }
		        else if(kx > (x[j] / t[j]) - 50 && kx < (x[j] / t[j]) + 50 && ky > (y[j] / t[j]) - 50 && ky < (y[j] / t[j]) + 50)
		        {
		            found = 1;
		            x[j] += kx;
		            y[j] += ky;
		            t[j]++;
		        }
		        j++;
		    }
	     }

	     for(int i = 0; i < 5; i++)
	     {
            	if(t[i] == 0)
            	{
			cout << x[i]  / t[i] << endl;
		        keypoints_n.push_back(KeyPoint(x[i] / t[i], y[i] / t[i], 1));
            	}
	     }
	     drawKeypoints( imgin, keypoints_n, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

	     //-- Show detected (drawn) keypoints
	     imshow("image", img_keypoints_1 );
	     char key = (char)waitKey(5);
	}
    //Extract the icontours so that
 //   vector<vector<Point> > contours0;
   // findContours( img, contours0, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE);
	
   // cout << contours0.size() << endl;
   // contours.resize(contours0.size());
   // for( size_t k = 0; k < contours0.size(); k++ )
    //    approxPolyDP(Mat(contours0[k]), contours[k], 3, true);

   // namedWindow( "contours", WINDOW_NORMAL );
  //  createTrackbar( "levels+3", "contours", &levels, 7, on_trackbar );

   // on_trackbar(0,0);
    waitKey();

    return 0;
}
Ejemplo n.º 29
0
void Tracker::track(int nsamples, double dynamicp)
{
	Mat frame;
	Mat hsvFrame;
	bool finishInnerLoop = false;
	Particles pf(nsamples, dynamicp);
	bool wasInit = false;
  
	namedWindow("fr", CV_WINDOW_KEEPRATIO);
	createTrackbar("kapa", "fr", &(pf.measure_param), 1000, NULL);
	setMouseCallback("fr", wrappedOnMouse, (void*)this);

	do{
		(*capture) >> frame;

		if(!frame.empty()){

			if(wasInit){
				cvtColor(frame, hsvFrame , CV_RGB2HSV);
				pf.resample();
				pf.predict();
				pf.measure(hsvFrame);
				pf.new_state(hsvFrame);

				for(int i=0 ; i<pf.pnum ; i++) {
					circle(frame, Point(pf.particles[i].pos_x, pf.particles[i].pos_y), 5, 
					       Scalar(0,0,255));
					circle(frame, Point((int)pf.mean_pos_x, (int)pf.mean_pos_y), 5, 
						   Scalar(255,0,0), -1);
					// rectangle(frame, Point(pf.particles[i].pos_x + (pf.width>>1), pf.particles[i].pos_y + (pf.height>>1)),
					//           Point(pf.particles[i].pos_x - (pf.width>>1), pf.particles[i].pos_y - (pf.height>>1)),
					//           Scalar(0,255,0));
				}
			}

			imshow("fr", frame);

			finishInnerLoop = false;
			switch(waitKey(2) & 255){
				case 't': // zastaveni prehravani a moznost oznacit objekt mysi

					filling = false;
					filled = false;
						
					while(!finishInnerLoop){
						Mat frameCopy = frame.clone();

						// vykresleni obdelniku, pokud tahnu mysi
						if(filling)
							rectangle(frameCopy, Point(px1, py1), Point(px2, py2), Scalar(255), 2);

						if(filled){
							filling = false;
							filled = false;
						}

						imshow("fr", frameCopy);

						switch(waitKey(2) & 255){
							case 't':
							case ' ':
								finishInnerLoop = true;
								Rect rct(Point(px1,py1), Point(px2,py2));
								if(rct.width <= 0 || rct.height <= 0)
									break;
								cvtColor(frame, hsvFrame , CV_RGB2HSV);
								pf.init_samples(hsvFrame, rct);
								wasInit = true;
								break;
						}
					}
					break;
			}

			writer->write(frame);
		}
	} while( !frame.empty() );
} 
Ejemplo n.º 30
0
void loadTODLikeBase(const string& dirname, vector<Mat>& bgrImages, vector<Mat>& depthes32F, vector<string>* imageFilenames)
{
    CV_Assert(!dirname.empty());

    vector<string> allFilenames;
    readDirectory(dirname, allFilenames, false);

    vector<string> imageIndices;
    imageIndices.reserve(allFilenames.size());
    for(size_t i = 0; i < allFilenames.size(); i++)
    {
        const string& imageFilename = allFilenames[i];
        // image_* and .png is at least 11 character
        if (imageFilename.size() < 11)
          continue;

        const string imageIndex = imageFilename.substr(6, imageFilename.length() - 6 - 4);

        if(imageFilename.substr(0, 6) == "image_" &&
           imageIndex.find_first_not_of("0123456789") == std::string::npos &&
           imageFilename.substr(imageFilename.length() - 4, 4) == ".png")
        {
            imageIndices.push_back(imageIndex);
        }
    }

    bgrImages.resize(imageIndices.size());
    depthes32F.resize(imageIndices.size());
    if(imageFilenames)
        imageFilenames->resize(imageIndices.size());

#pragma omp parallel for
    for(size_t i = 0; i < imageIndices.size(); i++)
    {
        string imageFilename = "image_" + imageIndices[i] + ".png";
        cout << "Load " << imageFilename << endl;

        if(imageFilenames)
            (*imageFilenames)[i] = imageFilename;

        // read image
        {
            string imagePath = dirname + "/" + imageFilename;
            Mat image = imread(imagePath);
            CV_Assert(!image.empty());
            bgrImages[i] = image;
        }

        // read depth
        {
            Mat depth;
            string depthPath = "depth_image_" + imageIndices[i] + ".xml.gz";
            FileStorage fs(dirname + "/" + depthPath, FileStorage::READ);
            if(fs.isOpened())
            {
                fs["depth_image"] >> depth;
            }
            else
            {
                depthPath = "depth_" + imageIndices[i] + ".png";
                depth = imread(dirname + "/" + depthPath, -1);
                CV_Assert(!depth.empty());
                Mat depth_flt;
                depth.convertTo(depth_flt, CV_32FC1, 0.001);
                depth_flt.setTo(std::numeric_limits<float>::quiet_NaN(), depth == 0);
                depth = depth_flt;
            }
#if 0
            cout << "Bilateral iltering" << endl;
            fs["depth_image"] >> depth;

            const double depth_sigma = 0.003;
            const double space_sigma = 3.5;  // in pixels
            Mat invalidDepthMask = (depth != depth) | (depth == 0.);
            depth.setTo(-5*depth_sigma, invalidDepthMask);
            Mat filteredDepth;
            bilateralFilter(depth, filteredDepth, -1, depth_sigma, space_sigma);
            filteredDepth.setTo(std::numeric_limits<float>::quiet_NaN(), invalidDepthMask);
            depth = filteredDepth;
#endif
            CV_Assert(!depth.empty());
            CV_Assert(depth.type() == CV_32FC1);
            depthes32F[i] = depth;
        }