Esempio n. 1
0
int ComputeHomographyFromPointCorrespondanceOpenCV ( struct FeatureList * source,
                                                     struct CameraCalibrationData * calibration,
                                                     struct TransformationMatrix * rotation_matrix,
                                                     struct TransformationMatrix * translation_matrix,
                                                     struct TransformationMatrix * rotation_and_translation_matrix,
                                                     struct TransformationMatrix * homography_matrix ,
                                                     unsigned int render_warped_image
                                                     )
{
   if ( source->current_features == 0 ) { return 0; }
   if ( source->current_features < 4) { return 0; }

   int i=0 , res = 1;
   unsigned int points_limit = source->current_features;
   //points_limit  = 4; // If we want just a perspective Transform and not a Homography

   CvMat* srcPoints = cvCreateMat(2,points_limit,CV_32FC1);
   if ( srcPoints != 0 )
   {
    for ( i=0; i<points_limit;  i++ )
     {   cvmSet(srcPoints,0,i,(float) source->list[i].last_x);
         cvmSet(srcPoints,1,i,(float) source->list[i].last_y);
     }
   }

   CvMat* dstPoints = cvCreateMat(2,points_limit,CV_32FC1);
   if ( dstPoints != 0 )
   {
    for ( i=0; i<points_limit; i++ )
     {   cvmSet(dstPoints,0,i,(float) source->list[i].x);
         cvmSet(dstPoints,1,i,(float) source->list[i].y);
     }
   }


   CvMat* status=0;
   CvMat* H =  cvCreateMat(3,3,CV_64F);
   cvZero(H);

    res = cvFindHomography(srcPoints,dstPoints,H,CV_RANSAC,2.5,status);
    //cvPerspectiveTransform(srcPoints,dstPoints, H);


   i=0;
   int mem=0,j=0;
   homography_matrix->rows=3;
   homography_matrix->columns=3;
    for(i=0; i<3; i++)
     { for(j=0; j<3; j++)
       {
         homography_matrix->item[mem++]=cvmGet(H,i,j);
       }
     }


   //  THIS OVERLAYS WARPED IMAGE OF LAST VIEW
 if (render_warped_image)
 {
   IplImage  * image = cvCreateImage( cvSize(320,240), IPL_DEPTH_8U, 3 );
   memcpy(image->imageData , video_register[CALIBRATED_LEFT_EYE].pixels , metrics[RESOLUTION_MEMORY_LIMIT_3BYTE]);
   IplImage  * dstImg = cvCloneImage(image);


   cvWarpPerspective(image, dstImg, H , CV_INTER_CUBIC | CV_WARP_FILL_OUTLIERS, cvScalarAll(0) );
   memcpy( video_register[LAST_RIGHT_OPERATION].pixels , dstImg->imageData , metrics[RESOLUTION_MEMORY_LIMIT_3BYTE]);
   video_register[LAST_RIGHT_OPERATION].time = video_register[CALIBRATED_LEFT_EYE].time;
   cvReleaseImage( &image );
   cvReleaseImage( &dstImg );
 }


   // transformed output image
   CvMat*  intriMat=cvCreateMat(3,3,CV_64F); //cvMat(3,3,CV_64F,calibration->intrinsic_parameters_array);
           cvmSet(intriMat,0,0,calibration->intrinsic_parameters_array[0]); cvmSet(intriMat,0,1,calibration->intrinsic_parameters_array[1]); cvmSet(intriMat,0,2,calibration->intrinsic_parameters_array[2]);
           cvmSet(intriMat,1,0,calibration->intrinsic_parameters_array[3]); cvmSet(intriMat,1,1,calibration->intrinsic_parameters_array[4]); cvmSet(intriMat,1,2,calibration->intrinsic_parameters_array[5]);
           cvmSet(intriMat,2,0,calibration->intrinsic_parameters_array[6]); cvmSet(intriMat,2,1,calibration->intrinsic_parameters_array[7]); cvmSet(intriMat,2,2,calibration->intrinsic_parameters_array[8]);



   CvMat*  homography_decomposition_to_translation_and_rotation = CreateHomographyRotationTranslationMatrix(H,intriMat);


   if ( homography_decomposition_to_translation_and_rotation != 0 )
   {
      ConvertMatrices( rotation_matrix,translation_matrix,rotation_and_translation_matrix , homography_decomposition_to_translation_and_rotation );
   }

   if ( srcPoints != 0 ) { cvReleaseMat(&srcPoints); }
   if ( dstPoints != 0 ) { cvReleaseMat(&dstPoints); }
   if ( H != 0 ) { cvReleaseMat(&H); }
   if ( homography_decomposition_to_translation_and_rotation != 0 ) { cvReleaseMat(&homography_decomposition_to_translation_and_rotation); }
   if ( intriMat != 0 ) { cvReleaseMat(&intriMat); }

   return res;
}
Esempio n. 2
0
//==============================================================================
//Binary image median filtering with a (2ts+1)^2 mask
void Cdlbk::Connect_Filter(int mWid, int mHei, unsigned char* pInput)
{	
	
	if (FT)
	{
		float flg_f=float(0.3);
		int ts = (int)(sqrt((float)FILTER_SIZE));
		unsigned int mask=0;
		unsigned int tem_sum=0;
		int i,j,m,n,x1,x2,y1,y2;
		unsigned char* ptem = new unsigned char[mWid*mHei];
		memset(ptem,0,sizeof(unsigned char)*mWid*mHei);
		unsigned char* pslipmast = new unsigned char[2*ts+1];
		memset(pslipmast,0,sizeof(unsigned char)*(2*ts+1));

		for( i=0; i<mHei; i+=1)
		{
			for( j=0; j<mWid; j+=1)
			{
				x1 = ( (j-ts) < 0    ) ? 0    : (j-ts);
				x2 = ( (j+ts) > mWid ) ? mWid : (j+ts);
				y1 = ( (i-ts) < 0    ) ? 0    : (i-ts);
				y2 = ( (i+ts) > mHei ) ? mHei : (i+ts);
				mask = (x2-x1+1)*(y2-y1+1);
				tem_sum=0;

				//for (n=y1; n<=y2; n++)
				//for (m=x1; m<=x2; m++)
				//	tem_sum+=pInput[n*mWid+m]/255;					
				//
				//float rst = (float)tem_sum/mask;
				//if(rst>flg_f)
				//ptem[i*mWid+j]=255;	
				
				if( mask!=(ts+ts+1)*(ts+ts+1) )
				{
					for (n=y1; n<=y2; n++)
					for (m=x1; m<=x2; m++)
						tem_sum+=pInput[n*mWid+m]/255;					

					float rst = (float)tem_sum/mask;
					if(rst>flg_f)
					ptem[i*mWid+j]=255;	
				}
				else
				{
					if(x1==0)//new row
					{
						for (m=x1; m<=x2; m++)
						{
							pslipmast[m] = 0;						
							for (n=y1; n<=y2; n++)					//cal every pslipmast element
								pslipmast[m]+=pInput[n*mWid+m]/255;					
						}
						
						tem_sum = 0;								//cal rst
						for (int k=0; k<=x2-x1; k++)
							tem_sum += pslipmast[k];
						float rst = (float)tem_sum/mask;
						if(rst>flg_f)
							ptem[i*mWid+j]=255;	
					}
					else
					{
						for (int q=0; q<x2-x1; q++)					 //slip buffer 
							pslipmast[q] = pslipmast[q+1];

						m = x2;										//cal last element of the slip buffer
						pslipmast[x2-x1]=0;
						for (n=y1; n<=y2; n++)
							pslipmast[x2-x1] += pInput[n*mWid+m]/255;					
						
						tem_sum = 0;								//cal rst
						for (int k=0; k<=x2-x1; k++)
							tem_sum += pslipmast[k];
						float rst = (float)tem_sum/mask;
						if(rst>flg_f)
							ptem[i*mWid+j]=255;	
					}			
				}						
			}
		}

		//update input data
		memcpy(pInput,ptem,sizeof(unsigned char)*mWid*mHei);
		delete [] ptem;
		delete [] pslipmast;
	}
	else
	{
		IplImage* pFore = cvCreateImageHeader(cvSize(mWid,mHei), 8, 1);
		cvSetData(pFore, pInput, mWid);	//4倍宽度
		CvMemStorage* storage = cvCreateMemStorage(0);
		CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
		cvFindContours( pFore, storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
		for (seq = first_seq; seq; seq = seq->h_next)
		{
			CvContour* cnt = (CvContour*)seq;
			double area = cvContourArea( cnt, CV_WHOLE_SEQ );
			if (fabs(area) <= FILTER_SIZE)
			{
				prev_seq = seq->h_prev;
				if( prev_seq )
				{
					prev_seq->h_next = seq->h_next;
					if( seq->h_next ) seq->h_next->h_prev = prev_seq;
				}
				else
				{
					first_seq = seq->h_next;
					if( seq->h_next ) seq->h_next->h_prev = NULL;
				}
			}
		}
		cvZero(pFore);
		cvDrawContours(pFore, first_seq, CV_RGB(255, 255, 255), CV_RGB(255, 255, 255), 10, -1);
		cvReleaseImageHeader(&pFore);
		cvReleaseMemStorage(&storage);
	}
}
// perform autocalibration using absolute quadric
bool mvg_autocalibration_2(CvMat ** Ps, double * principal_points, const size_t n, CvMat ** Xs, const size_t m, CvMat ** pi_infinity /*= NULL*/, bool affine /*= false*/)
{
	if (n < 3)
	{
		printf("at least three views must be selected\n");
		return false;
	}

	// create deep copy of the input data
	CvMat ** Ps_orig = ALLOC(CvMat *, n);
	for (size_t i = 0; i < n; i++)
	{
		Ps_orig[i] = Ps[i];
		Ps[i] = opencv_create_matrix(3, 4);
		cvCopy(Ps_orig[i], Ps[i]);
	}

	// move the principal point to the origin for every camera
	CvMat * T = opencv_create_matrix(3, 3);
	CvMat * S = opencv_create_matrix(3, 3);

	for (size_t i = 0; i < n; i++)
	{
		// set up the translation matrix
		cvZero(T);
		OPENCV_ELEM(T, 0, 0) = 1;
		OPENCV_ELEM(T, 1, 1) = 1;
		OPENCV_ELEM(T, 2, 2) = 1;
		OPENCV_ELEM(T, 0, 2) = -principal_points[2 * i + 0];
		OPENCV_ELEM(T, 1, 2) = -principal_points[2 * i + 1];

		// apply it to the projection matrix
		cvMatMul(T, Ps[i], Ps[i]);

		// also normalize scale
		cvZero(S);
		OPENCV_ELEM(S, 0, 0) = 0.001;
		OPENCV_ELEM(S, 1, 1) = 0.001;
		OPENCV_ELEM(S, 2, 2) = 1.0;
		cvMatMul(S, Ps[i], Ps[i]);
	}

	cvReleaseMat(&T); 
	cvReleaseMat(&S);

	// RANSAC paradigm state 
	const size_t samples = 3;
	int best_inliers_count = 0; 
	size_t best_inliers_ids[samples];
	bool 
		* best_inliers_marked = ALLOC(bool, n), 
		* inliers_marked = ALLOC(bool, n), 
		* sample_marked = ALLOC(bool, n)
	;
	memset(best_inliers_marked, 0, n * sizeof(bool));
	memset(best_inliers_ids, 0, samples * sizeof(size_t));

	// result
	CvMat 
		* pi_inf = NULL, 
		* H_rectify = NULL, 
		* H_rectify_inv = NULL, 
		* solution = NULL, 
		* W = NULL
	;

	// --- begin --- RANSAC paradigm iterator
	const int total = 500;
	for (int tries = 0; tries <= total; tries++) 
	{
	// --- end   --- RANSAC paradigm iterator

	// if this is the last RANSAC iteration, we'll use all inliers from the best sample
	memset(sample_marked, 0, n * sizeof(bool));
	
	if (tries == total) 
	{
		if (best_inliers_count >= 3) 
		{
			// use all inliers 
			memcpy(sample_marked, best_inliers_marked, n * sizeof(bool));
			printf("Last iteration, using the following sample: ");
			for (size_t i = 0; i < n; i++) { printf(sample_marked[i] ? "T" : "F"); }
			printf("\n");
		}
		else
		{
			// fallback method - use all shots
			printf("Failed to find consistent sample when autocalibrating! Using all shots!\n");
			memset(sample_marked, ~0, n * sizeof(bool));
			best_inliers_count = n;
		}
	}
	else
	{
		// generate sample 
		memset(sample_marked, 0, n * sizeof(bool)); 
		for (int count = 0; count < samples;)
		{
			const int pick = rand() % n; 
			if (!sample_marked[pick]) 
			{
				sample_marked[pick] = true;
				count++;
			}
		}
	}

	// fill the matrix W containing linear equations determining Q
	W = opencv_create_matrix(4 * (tries == total ? best_inliers_count : samples), 10);
	cvZero(W);
	int i = 0;
	for (size_t j = 0; j < n; j++)
	{
		if (!sample_marked[j]) continue;

		// shortcut for P 
		CvMat * const P = Ps[j];

		// (P * Omega * P_t)_12 = 0
		OPENCV_ELEM(W, i, 0) = q(P, 1, 2, 0);
		OPENCV_ELEM(W, i, 1) = q(P, 1, 2, 1);
		OPENCV_ELEM(W, i, 2) = q(P, 1, 2, 2);
		OPENCV_ELEM(W, i, 3) = q(P, 1, 2, 3);
		OPENCV_ELEM(W, i, 4) = q(P, 1, 2, 4);
		OPENCV_ELEM(W, i, 5) = q(P, 1, 2, 5);
		OPENCV_ELEM(W, i, 6) = q(P, 1, 2, 6);
		OPENCV_ELEM(W, i, 7) = q(P, 1, 2, 7);
		OPENCV_ELEM(W, i, 8) = q(P, 1, 2, 8);
		OPENCV_ELEM(W, i, 9) = q(P, 1, 2, 9);
		i++;

		// (P * Omega * P_t)_13 = 0
		OPENCV_ELEM(W, i, 0) = q(P, 1, 3, 0);
		OPENCV_ELEM(W, i, 1) = q(P, 1, 3, 1);
		OPENCV_ELEM(W, i, 2) = q(P, 1, 3, 2);
		OPENCV_ELEM(W, i, 3) = q(P, 1, 3, 3);
		OPENCV_ELEM(W, i, 4) = q(P, 1, 3, 4);
		OPENCV_ELEM(W, i, 5) = q(P, 1, 3, 5);
		OPENCV_ELEM(W, i, 6) = q(P, 1, 3, 6);
		OPENCV_ELEM(W, i, 7) = q(P, 1, 3, 7);
		OPENCV_ELEM(W, i, 8) = q(P, 1, 3, 8);
		OPENCV_ELEM(W, i, 9) = q(P, 1, 3, 9);
		i++;

		// (P * Omega * P_t)_23 = 0
		OPENCV_ELEM(W, i, 0) = q(P, 2, 3, 0);
		OPENCV_ELEM(W, i, 1) = q(P, 2, 3, 1);
		OPENCV_ELEM(W, i, 2) = q(P, 2, 3, 2);
		OPENCV_ELEM(W, i, 3) = q(P, 2, 3, 3);
		OPENCV_ELEM(W, i, 4) = q(P, 2, 3, 4);
		OPENCV_ELEM(W, i, 5) = q(P, 2, 3, 5);
		OPENCV_ELEM(W, i, 6) = q(P, 2, 3, 6);
		OPENCV_ELEM(W, i, 7) = q(P, 2, 3, 7);
		OPENCV_ELEM(W, i, 8) = q(P, 2, 3, 8);
		OPENCV_ELEM(W, i, 9) = q(P, 2, 3, 9);
		i++;

		// (P * Omega * P_t)_11 - (P * Omega * P_t)_22 = 0
		OPENCV_ELEM(W, i, 0) = q(P, 1, 1, 0) - q(P, 2, 2, 0);
		OPENCV_ELEM(W, i, 1) = q(P, 1, 1, 1) - q(P, 2, 2, 1);
		OPENCV_ELEM(W, i, 2) = q(P, 1, 1, 2) - q(P, 2, 2, 2);
		OPENCV_ELEM(W, i, 3) = q(P, 1, 1, 3) - q(P, 2, 2, 3);
		OPENCV_ELEM(W, i, 4) = q(P, 1, 1, 4) - q(P, 2, 2, 4);
		OPENCV_ELEM(W, i, 5) = q(P, 1, 1, 5) - q(P, 2, 2, 5);
		OPENCV_ELEM(W, i, 6) = q(P, 1, 1, 6) - q(P, 2, 2, 6);
		OPENCV_ELEM(W, i, 7) = q(P, 1, 1, 7) - q(P, 2, 2, 7);
		OPENCV_ELEM(W, i, 8) = q(P, 1, 1, 8) - q(P, 2, 2, 8);
		OPENCV_ELEM(W, i, 9) = q(P, 1, 1, 9) - q(P, 2, 2, 9);
		i++;
	}

	// solve the system
	solution = opencv_right_null_vector(W);
	cvReleaseMat(&W);

	// construct Q 
	CvMat * Q_temp = opencv_create_matrix(4, 4);
	int si = 0;
	for (int i = 0; i < 4; i++) 
	{
		for (int j = i; j < 4; j++) 
		{
			OPENCV_ELEM(Q_temp, i, j) = OPENCV_ELEM(solution, si, 0);
			if (i != j) OPENCV_ELEM(Q_temp, j, i) = OPENCV_ELEM(solution, si, 0);
			si++;
		}
	}

	cvReleaseMat(&solution);

	// SVD decomposition 
	CvMat * D = opencv_create_matrix(4, 4), * V = opencv_create_matrix(4, 4), * U = opencv_create_matrix(4, 4);
	cvSVD(Q_temp, D, U, V, CV_SVD_V_T);
	cvReleaseMat(&Q_temp);
	OPENCV_ELEM(D, 3, 3) = 0; // Q has rank 3
	CvMat * Q_star_inf = opencv_create_matrix(4, 4);
	cvZero(Q_star_inf);
	cvMatMul(U, D, Q_star_inf);
	cvMatMul(Q_star_inf, V, Q_star_inf);

	// now find the pi_inf
	pi_inf = opencv_right_null_vector(Q_star_inf);
	H_rectify = opencv_create_I_matrix(4);
	H_rectify_inv = NULL;

	if (!affine) 
	{
		// full metric reconstruction
		CvMat * E_U = opencv_create_matrix(4, 4), * E_V = opencv_create_matrix(4, 4), * E_D = opencv_create_matrix(4, 1);
		cvSVD(Q_star_inf, E_D, E_U, E_V, CV_SVD_V_T);
		for (int j = 0; j < 3; j++)
		{
			for (int i = 0; i < 4; i++) 
			{
				OPENCV_ELEM(E_U, i, j) *= sqrt(OPENCV_ELEM(E_D, j, 0));
			}
		}
		cvInvert(E_U, H_rectify);
		H_rectify_inv = E_U;
		cvReleaseMat(&E_V);
		cvReleaseMat(&E_D);
	}
	else
	{
		// affine 
		for (int i = 0; i < 4; i++) { OPENCV_ELEM(H_rectify, 3, i) = OPENCV_ELEM(pi_inf, i, 0); }
		cvInvert(H_rectify, H_rectify_inv);
	}

	if (tries == total) 
	{
		// apply it to all points
		for (int i = 0; i < m; i++) 
		{
			cvMatMul(H_rectify, Xs[i], Xs[i]);
		}
	
		// apply it to original matrices 
		for (int i = 0; i < n; i++)
		{
			cvMatMul(Ps_orig[i], H_rectify_inv, Ps_orig[i]);
		}
	}

	// now calculate principal points of all the matrices and count inliers
	CvMat 
		* rectified_K = opencv_create_matrix(3, 3),
		* rectified_R = opencv_create_matrix(3, 3),
		* rectified_T = opencv_create_matrix(3, 1)
	;

	memset(inliers_marked, 0, n * sizeof(bool));
	size_t inliers_count = 0;
	CvMat * P_temp = opencv_create_matrix(3, 4); 

	printf("[");
	for (int i = 0; i < n; i++) 
	{
		// at the end of the final iteration, the matrices Ps_orig are already rectified
		if (tries < total) 
		{
			cvMatMul(Ps_orig[i], H_rectify_inv, P_temp);
		}
		else
		{
			cvCopy(Ps_orig[i], P_temp);
		}

		const bool decomposed = mvg_finite_projection_matrix_decomposition(P_temp, rectified_K, rectified_R, rectified_T);

		if (decomposed) 
		{
			const double PP_distance = sqrt( 
				sq(OPENCV_ELEM(rectified_K, 0, 2) - principal_points[2 * i + 0]) + 
				sq(OPENCV_ELEM(rectified_K, 1, 2) - principal_points[2 * i + 1])
			); 

			if (PP_distance < 100) 
			{
				inliers_count++;
				inliers_marked[i] = true;
			}
			else
			{
				inliers_marked[i] = false;
			}
			// test
			// inliers_marked[i] = sample_marked[i];
			printf("%f ", PP_distance);
		}
		else
		{
			inliers_marked[i] = false;
			printf("failed to extract PP\n");
		}
	}

	cvReleaseMat(&P_temp);

	// debug output
	printf("] ");
	for (size_t i = 0; i < n; i++) { printf(inliers_marked[i] ? "T" : "F"); }
	printf(" %d ", inliers_count); 
	printf("\n");

	// set the best result 
	if (inliers_count > best_inliers_count) 
	{
		memcpy(best_inliers_marked, inliers_marked, n * sizeof(bool)); 
		best_inliers_count = inliers_count; 
	}

	// TODO release allocated matrices 
	cvReleaseMat(&rectified_K);
	cvReleaseMat(&rectified_R);
	cvReleaseMat(&rectified_T);

	// --- begin --- RANSAC paradigm iterator close
	}
	printf("\n");
	// --- end   --- RANSAC paradigm iterator close

	// release memory
	if (pi_infinity) 
	{
		*pi_infinity = pi_inf;
	}
	else
	{
		cvReleaseMat(&pi_inf);
	}

	for (size_t i = 0; i < n; i++) 
	{
		cvReleaseMat(Ps + i);
	}
	
	cvReleaseMat(&H_rectify);
	cvReleaseMat(&H_rectify_inv);
	cvReleaseMat(&solution);
	cvReleaseMat(&W);

	return true;
}
Esempio n. 4
0
int run_calibration( CvSeq* image_points_seq, CvSize img_size, CvSize board_size,
                     float square_size, float aspect_ratio, int flags,
                     CvMat* camera_matrix, CvMat* dist_coeffs, CvMat** extr_params,
                     CvMat** reproj_errs, double* avg_reproj_err )
{
    int code;
    int image_count = image_points_seq->total;
    int point_count = board_size.width*board_size.height;
    CvMat* image_points = cvCreateMat( 1, image_count*point_count, CV_32FC2 );
    CvMat* object_points = cvCreateMat( 1, image_count*point_count, CV_32FC3 );
    CvMat* point_counts = cvCreateMat( 1, image_count, CV_32SC1 );
    CvMat rot_vects, trans_vects;
    int i, j, k;
    CvSeqReader reader;
    cvStartReadSeq( image_points_seq, &reader );

    // initialize arrays of points
    for( i = 0; i < image_count; i++ )
    {
        CvPoint2D32f* src_img_pt = (CvPoint2D32f*)reader.ptr;
        CvPoint2D32f* dst_img_pt = ((CvPoint2D32f*)image_points->data.fl) + i*point_count;
        CvPoint3D32f* obj_pt = ((CvPoint3D32f*)object_points->data.fl) + i*point_count;

        for( j = 0; j < board_size.height; j++ )
            for( k = 0; k < board_size.width; k++ )
            {
                *obj_pt++ = cvPoint3D32f(j*square_size, k*square_size, 0);
                *dst_img_pt++ = *src_img_pt++;
            }
        CV_NEXT_SEQ_ELEM( image_points_seq->elem_size, reader );
    }

    cvSet( point_counts, cvScalar(point_count) );

    *extr_params = cvCreateMat( image_count, 6, CV_32FC1 );
    cvGetCols( *extr_params, &rot_vects, 0, 3 );
    cvGetCols( *extr_params, &trans_vects, 3, 6 );

    cvZero( camera_matrix );
    cvZero( dist_coeffs );

    if( flags & CV_CALIB_FIX_ASPECT_RATIO )
    {
        camera_matrix->data.db[0] = aspect_ratio;
        camera_matrix->data.db[4] = 1.;
    }

    cvCalibrateCamera2( object_points, image_points, point_counts,
                        img_size, camera_matrix, dist_coeffs,
                        &rot_vects, &trans_vects, flags );

    code = cvCheckArr( camera_matrix, CV_CHECK_QUIET ) &&
        cvCheckArr( dist_coeffs, CV_CHECK_QUIET ) &&
        cvCheckArr( *extr_params, CV_CHECK_QUIET );

    *reproj_errs = cvCreateMat( 1, image_count, CV_64FC1 );
    *avg_reproj_err =
        compute_reprojection_error( object_points, &rot_vects, &trans_vects,
            camera_matrix, dist_coeffs, image_points, point_counts, *reproj_errs );


    fprintf( stderr, " Rot : %f\n",rot_vects.data.fl[0]); fprintf( stderr, "%f\n",rot_vects.data.fl[1]); fprintf( stderr, "%f\n",rot_vects.data.fl[2]);
    fprintf( stderr, " Tra : %f\n",trans_vects.data.fl[0]); fprintf( stderr, "%f\n",trans_vects.data.fl[1]); fprintf( stderr, "%f\n",trans_vects.data.fl[2]);


    cvReleaseMat( &object_points );
    cvReleaseMat( &image_points );
    cvReleaseMat( &point_counts );

    return code;
}
Esempio n. 5
0
/**
   Initializes all variables that don't need to get updated for each flow calculation.
   Note: Not much error checking is done, all inputs should be > 0

   @param[in] width_in   Width of images that will be used for calculation
   @param[in] height_in   Height of images that will be used for calculation
   @param[in] max_level_in   The maximum level that will be reached in the multigrid algorithm, higher maximum level = coarser level reached
   @param[in] start_level_in   The starting level used as the base in the multigrid algorithm, higher start level = coarser starting level
   @param[in] n1_in   Number of pre-smoothing steps in the multigrid cycle
   @param[in] n2_in   Number of post-smoothing steps in the multigrid cycle
   @param[in] rho_in   Gaussian smoothing parameter
   @param[in] alpha_in   Regularisation parameter in the energy functional
   @param[in] sigma_in   Gaussian smoothing parameter

*/
VarFlow::VarFlow(int width_in, int height_in, int max_level_in, int start_level_in, int n1_in, int n2_in,
                float rho_in, float alpha_in, float sigma_in){
					
	max_level = max_level_in;
    start_level = start_level_in;
    
    if(max_level < start_level)
    {
        max_level = start_level;
	std::cout<<"Warning: input max_level < start_level, correcting (new value = "<<max_level<<")"<< std::endl;
    }
	
	//Width and height of the largest image in the multigrid cycle, based on external input image dimensions
	//and the desired starting level
	int width = (int)floor(width_in/pow((float)2.0,(float)(start_level)));
    int height = (int)floor(height_in/pow((float)2.0,(float)(start_level)));
    
    // start_level too large, correct it
    if(width < 1 || height < 1)
    {
        if(width < 1)
        {
              start_level	= (int)floor(log(static_cast<float>(width_in)) / log(2.0));
              width			= (int)floor(width_in / pow((float)2.0,(float)(start_level)));
              height		= (int)floor(height_in / pow((float)2.0,(float)(start_level)));
        }
        
        if(height < 1)
        {
              start_level   = (int)floor(log(static_cast<float>(height_in)) / log(2.0));
              width			= (int)floor(width_in/pow((float)2.0,(float)(start_level)));
              height		= (int)floor(height_in/pow((float)2.0,(float)(start_level)));
        }
    
        // Correct max_level as well
        max_level = start_level;
	std::cout<<"Warning: start_level too large, correcting start_level and max_level (new value = "<<start_level<<")"<< std::endl;
        
    }
    
    int width_end = (int)floor(width_in/pow((float)2.0,(float)(max_level)));
    int height_end = (int)floor(height_in/pow((float)2.0,(float)(max_level)));
    
    // max_level too large, correct it
    if(width_end < 1 || height_end < 1)
    {
        if(width_end < 1)
        {
              max_level = (int)floor(log(static_cast<float>(width_in)) / log(2.0));
              height_end = (int)floor(height_in/pow((float)2.0,(float)(max_level)));
        }
        
        if(height_end < 1)
        {
              max_level = (int)floor(log(static_cast<float>(height_in)) / log(2.0));
        }
        
	std::cout<<"Warning: max_level too large, correcting (new value = "<<max_level<<")"<< std::endl;
        
    }
          
             
    n1 = n1_in;
    n2 = n2_in;
    
    rho = rho_in;
    alpha = alpha_in;
    sigma = sigma_in;
    
    // Spacial derivative masks
    mask_x[0] = 0.08333;
    mask_x[1] = -0.66666;
    mask_x[2] = 0;
    mask_x[3] = 0.66666;
    mask_x[4] = -0.08333;
    
    mask_y[0] = -0.08333;
    mask_y[1] = 0.66666;
    mask_y[2] = 0;
    mask_y[3] = -0.66666;
    mask_y[4] = 0.08333;
    
    fx_mask = cvMat(1, 5, CV_32F, mask_x);
    fy_mask = cvMat(5, 1, CV_32F, mask_y);
    
    //Resized input images will be stored in these variables
    imgAsmall = cvCreateImage(cvSize(width, height), 8, 1);
    imgBsmall = cvCreateImage(cvSize(width, height), 8, 1);
    
    //Float representations of resized input images
    imgAfloat = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    imgBfloat = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    
    //Spacial and temporal derivatives of input image A
    imgAfx = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    imgAfy = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    imgAft = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    
    //Arrays to hold images of various sizes used in the multigrid cycle
    imgAfxfx_array = new IplImage*[max_level-start_level+1];  
    imgAfxfy_array = new IplImage*[max_level-start_level+1];  
    imgAfxft_array = new IplImage*[max_level-start_level+1];  
    imgAfyfy_array = new IplImage*[max_level-start_level+1];  
    imgAfyft_array = new IplImage*[max_level-start_level+1];  
    
    imgU_array = new IplImage*[max_level-start_level+1];  
    imgV_array = new IplImage*[max_level-start_level+1];  
    imgU_res_err_array = new IplImage*[max_level-start_level+1];  
    imgV_res_err_array = new IplImage*[max_level-start_level+1];  

    int i;
    
    //Allocate memory for image arrays
    for(i = 0; i < (max_level-start_level+1); i++){
        
        imgAfxfx_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgAfxfy_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgAfxft_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgAfyfy_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgAfyft_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
    
        imgU_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgV_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgU_res_err_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgV_res_err_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
    
        cvZero(imgU_array[i]);
        cvZero(imgV_array[i]);
        cvZero(imgU_res_err_array[i]);
        cvZero(imgV_res_err_array[i]);
       
    }
    
    initialized = 1;
    
}
Esempio n. 6
0
// 参数: 
// img - 输入视频帧 // dst - 检测结果 
void Invade::update_mhi(IplImage* img, IplImage* dst, int diff_threshold)
{
	double timestamp = clock() / 100.; // get current time in seconds 时间戳 
	CvSize size = cvSize(img->width, img->height); // get current frame size,得到当前帧的尺寸 
	int i, idx1, idx2;
	IplImage* silh;
	IplImage* pyr = cvCreateImage(cvSize((size.width & -2) / 2, (size.height & -2) / 2), 8, 1);
	CvMemStorage *stor;
	CvSeq *cont;

	/*先进行数据的初始化*/
	if (!mhi || mhi->width != size.width || mhi->height != size.height)
	{
		if (buf == 0) //若尚没有初始化则分配内存给他 
		{
			buf = (IplImage**)malloc(N*sizeof(buf[0]));
			memset(buf, 0, N*sizeof(buf[0]));
		}

		for (i = 0; i < N; i++)
		{
			cvReleaseImage(&buf[i]);
			buf[i] = cvCreateImage(size, IPL_DEPTH_8U, 1);
			cvZero(buf[i]);// clear Buffer Frame at the beginning 
		}
		cvReleaseImage(&mhi);
		mhi = cvCreateImage(size, IPL_DEPTH_32F, 1);
		cvZero(mhi); // clear MHI at the beginning 
	} // end of if(mhi) 

	/*将当前要处理的帧转化为灰度放到buffer的最后一帧中*/
	cvCvtColor(img, buf[last], CV_BGR2GRAY); // convert frame to grayscale 

	/*设定帧的序号*/
	idx1 = last;
	idx2 = (last + 1) % N; // index of (last - (N-1))th frame 
	last = idx2;

	// 做帧差 
	silh = buf[idx2];//差值的指向idx2 
	cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames 

	// 对差图像做二值化 
	cvThreshold(silh, silh, 50, 255, CV_THRESH_BINARY); //threshold it,二值化 

	//去掉超时的影像以更新运动历史图像
	cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI 

	cvConvert(mhi, dst);//将mhi转化为dst,dst=mhi 

	// 中值滤波,消除小的噪声 
	cvSmooth(dst, dst, CV_MEDIAN, 3, 0, 0, 0);

	cvPyrDown(dst, pyr, CV_GAUSSIAN_5x5);// 向下采样,去掉噪声,图像是原图像的四分之一 
	cvDilate(pyr, pyr, 0, 1); // 做膨胀操作,消除目标的不连续空洞 
	cvPyrUp(pyr, dst, CV_GAUSSIAN_5x5);// 向上采样,恢复图像,图像是原图像的四倍 

	// 下面的程序段用来找到轮廓 
	// Create dynamic structure and sequence. 
	stor = cvCreateMemStorage(0);
	cont = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), stor);

	// 找到所有轮廓 
	cvFindContours(dst, stor, &cont, sizeof(CvContour),
		CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

	// 直接使用CONTOUR中的矩形来画轮廓 
	for (; cont; cont = cont->h_next)
	{
		CvRect r = ((CvContour*)cont)->rect;
		if (r.height * r.width > CONTOUR_MAX_AERA) // 面积小的方形抛弃掉 
		{
			cvRectangle(img, cvPoint(r.x, r.y),
				cvPoint(r.x + r.width, r.y + r.height),
				CV_RGB(255, 0, 0), 1, CV_AA, 0);
		}
	}
	// free memory 
	cvReleaseMemStorage(&stor);
	cvReleaseImage(&pyr);
}
Esempio n. 7
0
int WINAPI WinMain(HINSTANCE hThisInstance, HINSTANCE hPrevInstance, LPSTR lpszArgs, int nWinMode)
{
	// переменные для хранения изображений
	IplImage *frame = 0, *image = 0, *hsv = 0, *dst = 0, *dst2 = 0, *color_indexes = 0, *dst3 = 0, *image2 = 0, *tmp = 0;
	int key = 0, zx = 0, zy = 0;

	// загружаем картинку из файла
	IplImage *menu = cvLoadImage("menu.png");
	// создаем главное окно проекта
	cvNamedWindow("Проект OpenCV");
	cvShowImage("Проект OpenCV",menu);
	cvMoveWindow("Проект OpenCV",100,50);

	// получаем любую подключенную Web-камеру
    CvCapture *capture = cvCaptureFromCAM(CV_CAP_ANY);

    // частота кадров
	double fps = 18;
	// инициализация записи видео в файл; 4-буквенный код кодека для обработки видео, формируется макросом CV_FOURCC
	CvVideoWriter *writer = cvCreateVideoWriter("record.avi", CV_FOURCC('I','Y','U','V'), fps, cvSize(640, 480), 1);

	if (!capture)
		return 0;
	else
	{
		while(key != 27)
		{
			// получаем текущий кадр
			frame = cvQueryFrame(capture);
			// копируем его для обработки
			image = cvCloneImage(frame);
	
			// зум
			if(key=='+')
			{
					zx = zx + 4;
					zy = zy + 3;
			}
			if(key=='-')
			{
					zx = zx - 4;
					zy = zy - 3;
			}
			if(zx > 300)
			{
					zx = 300;
					zy = 225;
			}
			if(zx < 0)
			{
					zx = 0;
					zy = 0;
			}

			// задаем ширину и высоту ROI
			int zwidth = frame->width-2*zx; 
			int zheight = frame->height-2*zy;

			// устанавливаем ROI (Region Of Interest — интересующая область изображения)
			cvSetImageROI(frame, cvRect(zx,zy,zwidth,zheight));
			// копируем интересующую область в переменную image2
			image2 = cvCloneImage(frame); 
			// создаем пустое изображение размером 640x480
			tmp = cvCreateImage( cvSize(640, 480), frame->depth, frame->nChannels );
			// размещаем ROI на пустое изображение tmp
			cvResize(image2, tmp, 0);

			// сохраняем кадр в видео-файл
            cvWriteFrame(writer, tmp);

			// сбрасываем ROI
			cvResetImageROI(frame);

			// инициализация шрифта
			CvFont font;
			cvInitFont( &font, CV_FONT_HERSHEY_COMPLEX,1.0, 1.0, 0, 1, CV_AA);
			// используя шрифт выводим на картинку текст
			cvPutText(tmp, "press '+' to increase", cvPoint(150, 40), &font, CV_RGB(150, 0, 150) );
			cvPutText(tmp, "press '-' to reduce", cvPoint(165, 450), &font, CV_RGB(150, 0, 150) );

			// число пикселей данного цвета на изображении 
			uint colorCount[NUM_COLOR_TYPES] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };

			hsv = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); 
			cvCvtColor( image, hsv, CV_BGR2HSV );

			// картинки для хранения результатов
			dst = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 ); 
			dst2 = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
			color_indexes = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); //для хранения индексов цвета

			// для хранения RGB цветов
			CvScalar rgb_colors[NUM_COLOR_TYPES];

			int i=0, j=0, x=0, y=0;

			// обнуляем цвета
			for(i=0; i<NUM_COLOR_TYPES; i++) {
					rgb_colors[i] = cvScalarAll(0);
			}

			for (y=0; y<hsv->height; y++) {
					for (x=0; x<hsv->width; x++) {

							// получаем HSV-компоненты пикселя
							uchar H = CV_PIXEL(uchar, hsv, x, y)[0];        // Hue
							uchar S = CV_PIXEL(uchar, hsv, x, y)[1];        // Saturation
							uchar V = CV_PIXEL(uchar, hsv, x, y)[2];        // Value (Brightness)

							// определяем к какому цвету можно отнести данные значения
							int ctype = getPixelColorType(H, S, V);

							// устанавливаем этот цвет у отладочной картинки
							CV_PIXEL(uchar, dst, x, y)[0] = cCTHue[ctype];  // Hue
							CV_PIXEL(uchar, dst, x, y)[1] = cCTSat[ctype];  // Saturation
							CV_PIXEL(uchar, dst, x, y)[2] = cCTVal[ctype];  // Value

							// собираем RGB-составляющие
							rgb_colors[ctype].val[0] += CV_PIXEL(uchar, image, x, y)[0]; // B
							rgb_colors[ctype].val[1] += CV_PIXEL(uchar, image, x, y)[1]; // G
							rgb_colors[ctype].val[2] += CV_PIXEL(uchar, image, x, y)[2]; // R

							// сохраняем к какому типу относится цвет
							CV_PIXEL(uchar, color_indexes, x, y)[0] = ctype;

							// подсчитываем
							colorCount[ctype]++;
					}
			}

			// усреднение RGB-составляющих
			for(i=0; i<NUM_COLOR_TYPES; i++) {
					rgb_colors[i].val[0] /= colorCount[i];
					rgb_colors[i].val[1] /= colorCount[i];
					rgb_colors[i].val[2] /= colorCount[i];
			}

			// теперь загоним массив в вектор и отсортируем
			std::vector< std::pair< int, uint > > colors;
			colors.reserve(NUM_COLOR_TYPES);

			for(i=0; i<NUM_COLOR_TYPES; i++){
					std::pair< int, uint > color;
					color.first = i;
					color.second = colorCount[i];
					colors.push_back( color );
			}
		
			// сортируем
			std::sort( colors.begin(), colors.end(), colors_sort );

			// покажем цвета
			cvZero(dst2);
			int h = dst2->height;
			int w = dst2->width / RECT_COLORS_SIZE;
			for(i=0; i<RECT_COLORS_SIZE; i++ ){
					cvRectangle(dst2, cvPoint(i*w, 0), cvPoint(i*w+w, h), rgb_colors[colors[i].first], -1);
			}

			// покажем картинку в найденных цветах
			dst3 = cvCloneImage(image);
			for (y=0; y<dst3->height; y++) {
					for (x=0; x<dst3->width; x++) {
							int color_index = CV_PIXEL(uchar, color_indexes, x, y)[0];

							CV_PIXEL(uchar, dst3, x, y)[0] = rgb_colors[color_index].val[0];
							CV_PIXEL(uchar, dst3, x, y)[1] = rgb_colors[color_index].val[1];
							CV_PIXEL(uchar, dst3, x, y)[2] = rgb_colors[color_index].val[2];
					}
			}

			// конвертируем отладочную картинку обратно в RGB
			cvCvtColor( dst, dst, CV_HSV2BGR );

			cvSetMouseCallback("Проект OpenCV", ClickOnMenu, (void*) menu);

			if(flag_1 == 1)
			{
				cvNamedWindow("Веб-камера", CV_WINDOW_AUTOSIZE);
				cvShowImage("Веб-камера", image);
			}
			else cvDestroyWindow("Веб-камера");
			if(flag_2 == 1)
			{
				cvNamedWindow("Zoom", CV_WINDOW_AUTOSIZE);
				cvShowImage("Zoom", tmp);
			}
			else cvDestroyWindow("Zoom");
			if(flag_3 == 1)
			{
				cvNamedWindow("Обнаруженные цвета");
				cvShowImage("Обнаруженные цвета", dst2);
			}
			else cvDestroyWindow("Обнаруженные цвета");
			if(flag_4 == 1)
			{
				cvNamedWindow("Изображение в обнаруженных цветах");
				cvShowImage("Изображение в обнаруженных цветах", dst3);
			}
			else cvDestroyWindow("Изображение в обнаруженных цветах");
			if(flag_5 == 1)
			{
				cvNamedWindow("Из HSV в RGB");
				cvShowImage("Из HSV в RGB", dst);
			}
			else cvDestroyWindow("Из HSV в RGB");
	
			// освобождаем ресурсы
			cvReleaseImage(&hsv);
			cvReleaseImage(&dst);
			cvReleaseImage(&dst2);
			cvReleaseImage(&color_indexes);
			cvReleaseImage(&dst3);
			cvReleaseImage(&image);
			cvReleaseImage(&image2);
			cvReleaseImage(&tmp);

			if(flag_exit == 1)
			{
				cvReleaseCapture(&capture);
				cvReleaseVideoWriter(&writer); // закрываем видео-файл
				return 0;
			}

			// если нажали ESC - выходим из цикла
			key = cvWaitKey(1);
		}

		// освобождаем инициализированные ранее переменные
		cvReleaseCapture(&capture);
		cvReleaseVideoWriter(&writer);

	}
    return 0;
}
Esempio n. 8
0
int CvMLData::read_csv(const char* filename)
{
    const int M = 1000000;
    const char str_delimiter[3] = { ' ', delimiter, '\0' };
    FILE* file = 0;
    CvMemStorage* storage;
    CvSeq* seq;
    char *ptr;
    float* el_ptr;
    CvSeqReader reader;
    int cols_count = 0;    
    uchar *var_types_ptr = 0;

    clear();

    file = fopen( filename, "rt" );
    
    if( !file )
        return -1;

    // read the first line and determine the number of variables
    std::vector<char> _buf(M);
    char* buf = &_buf[0];
    if( !fgets_chomp( buf, M, file ))
    {
        fclose(file);
        return -1;
    }
    for( ptr = buf; *ptr != '\0'; ptr++ )
        cols_count += (*ptr == delimiter);

    if ( cols_count == 0)
    {
        fclose(file);
        return -1;
    }
    cols_count++;

    // create temporary memory storage to store the whole database
    el_ptr = new float[cols_count];
    storage = cvCreateMemStorage();
    seq = cvCreateSeq( 0, sizeof(*seq), cols_count*sizeof(float), storage );

    var_types = cvCreateMat( 1, cols_count, CV_8U );
    cvZero( var_types );
    var_types_ptr = var_types->data.ptr;

    for(;;)
    {
        char *token = NULL;
        int type;
        token = strtok(buf, str_delimiter);
        if (!token) 
        {
             fclose(file);
             return -1;
        }
        for (int i = 0; i < cols_count-1; i++)
        {
            str_to_flt_elem( token, el_ptr[i], type);
            var_types_ptr[i] |= type;
            token = strtok(NULL, str_delimiter);
            if (!token)
            {
                fclose(file);
                return -1;
            }
        }
        str_to_flt_elem( token, el_ptr[cols_count-1], type);
        var_types_ptr[cols_count-1] |= type;
        cvSeqPush( seq, el_ptr );
        if( !fgets_chomp( buf, M, file ) || !strchr( buf, delimiter ) )
            break;
    }
    fclose(file);

    values = cvCreateMat( seq->total, cols_count, CV_32FC1 );
    missing = cvCreateMat( seq->total, cols_count, CV_8U );
    var_idx_mask = cvCreateMat( 1, values->cols, CV_8UC1 );
    cvSet( var_idx_mask, cvRealScalar(1) );
    train_sample_count = seq->total;

    cvStartReadSeq( seq, &reader );
    for(int i = 0; i < seq->total; i++ )
    {
        const float* sdata = (float*)reader.ptr;
        float* ddata = values->data.fl + cols_count*i;
        uchar* dm = missing->data.ptr + cols_count*i;

        for( int j = 0; j < cols_count; j++ )
        {
            ddata[j] = sdata[j];
            dm[j] = ( fabs( MISS_VAL - sdata[j] ) <= FLT_EPSILON );
        }
        CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
    }

    if ( cvNorm( missing, 0, CV_L1 ) <= FLT_EPSILON )
        cvReleaseMat( &missing );

    cvReleaseMemStorage( &storage );
    delete []el_ptr;
    return 0;
}
Esempio n. 9
0
CV_IMPL CvKalman*
cvCreateKalman( int DP, int MP, int CP )
{
    CvKalman *kalman = 0;

    if( DP <= 0 || MP <= 0 )
        CV_Error( CV_StsOutOfRange,
        "state and measurement vectors must have positive number of dimensions" );

    if( CP < 0 )
        CP = DP;

    /* allocating memory for the structure */
    kalman = (CvKalman *)cvAlloc( sizeof( CvKalman ));
    memset( kalman, 0, sizeof(*kalman));

    kalman->DP = DP;
    kalman->MP = MP;
    kalman->CP = CP;

    kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 );
    cvZero( kalman->state_pre );

    kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 );
    cvZero( kalman->state_post );

    kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 );
    cvSetIdentity( kalman->transition_matrix );

    kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 );
    cvSetIdentity( kalman->process_noise_cov );

    kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 );
    cvZero( kalman->measurement_matrix );

    kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 );
    cvSetIdentity( kalman->measurement_noise_cov );

    kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 );

    kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 );
    cvZero( kalman->error_cov_post );

    kalman->gain = cvCreateMat( DP, MP, CV_32FC1 );

    if( CP > 0 )
    {
        kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 );
        cvZero( kalman->control_matrix );
    }

    kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 );
    kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 );
    kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 );
    kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 );
    kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 );

#if 1
    kalman->PosterState = kalman->state_pre->data.fl;
    kalman->PriorState = kalman->state_post->data.fl;
    kalman->DynamMatr = kalman->transition_matrix->data.fl;
    kalman->MeasurementMatr = kalman->measurement_matrix->data.fl;
    kalman->MNCovariance = kalman->measurement_noise_cov->data.fl;
    kalman->PNCovariance = kalman->process_noise_cov->data.fl;
    kalman->KalmGainMatr = kalman->gain->data.fl;
    kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl;
    kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl;
#endif

    return kalman;
}
Esempio n. 10
0
int track( IplImage* frame, int flag,int Cx,int Cy,int R )
{

    {

        int i, bin_w, c;

        LOGE("#######################Check1############################");

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
            hue = cvCreateImage( cvGetSize(frame), 8, 1 );
            mask = cvCreateImage( cvGetSize(frame), 8, 1 );
            backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
            hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
            histimg = cvCreateImage( cvSize(320,200), 8, 3 );
            cvZero( histimg );
            LOGE("######################Check2###########################");
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, hsv, CV_BGR2HSV );


        {
            int _vmin = vmin, _vmax = vmax;

            cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
                        cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
            cvSplit( hsv, hue, 0, 0, 0 );
            LOGE("###########################Check3######################");
            if(flag==0)
            {
            	LOGE("###############Initialized#############################");
				selection.x=Cx-R;
				selection.y=Cy-R;
				selection.height=2*R;
				selection.width=2*R;
                float max_val = 0.f;
                cvSetImageROI( hue, selection );
                cvSetImageROI( mask, selection );
                cvCalcHist( &hue, hist, 0, mask );
                cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
                cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
                cvResetImageROI( hue );
                cvResetImageROI( mask );
                track_window = selection;
                track_object = 1;

                cvZero( histimg );
                bin_w = histimg->width / hdims;
                for( i = 0; i < hdims; i++ )
                {
                    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
                    CvScalar color = hsv2rgb(i*180.f/hdims);
                    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
                                 cvPoint((i+1)*bin_w,histimg->height - val),
                                 color, -1, 8, 0 );
                }
                LOGE("##############Check4#########################");
            }
            LOGE("##############Check5#########################");
            cvCalcBackProject( &hue, backproject, hist );
            cvAnd( backproject, mask, backproject, 0 );
            cvCamShift( backproject, track_window,
                        cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
                        &track_comp, &track_box );
            track_window = track_comp.rect;
            char buffer[50];
            sprintf(buffer,"vals= %d %d and %d",track_window.x,track_window.y,track_window.width);
            LOGE(buffer);
            if( backproject_mode )
                cvCvtColor( backproject, image, CV_GRAY2BGR );
            if( image->origin )
                track_box.angle = -track_box.angle;
            cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
        }

        if( select_object && selection.width > 0 && selection.height > 0 )
        {
            cvSetImageROI( image, selection );
            cvXorS( image, cvScalarAll(255), image, 0 );
            cvResetImageROI( image );
        }

        LOGE("!!!!!!!!!!!!!!!!!!Done Tracking!!!!!!!!!!!!!!!!!!!!!!!!!!!!");


    }



    return 0;
}
Esempio n. 11
0
////////////////////////////////////////////////////////////////////////////////////	
//以彩色图像显示每一尺度的张量信息
////////////////////////////////////////////////////////////////////////////////////
void Tensor::ShowTensorByColorImage()
{
	double ret_minr=0.0;
	double ret_maxr=0.0;
	double ret_ming=0.0;
	double ret_maxg=0.0;
	double ret_minb=0.0;
	double ret_maxb=0.0;
	int x,y,i;
	//纹理特征
	IplImage **pImg= new IplImage *[m_levels];
	for (i = 0;i < m_levels;i++)
	{
		pImg[i] = cvCreateImage( cvGetSize(m_img), m_img->depth, 3);
		cvZero(pImg[i]);
	}

	CString * ptitle=new CString [m_levels];

	for (i=0;i<m_levels;i++)
	{
		//找到每幅图像颜色通道的上限与下限值
		for (y=0; y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				if((*m_pImageTensorRGB[i])(x,y).r>ret_maxr)
				{
					ret_maxr=(*m_pImageTensorRGB[i])(x,y).r;
				}
				if ((*m_pImageTensorRGB[i])(x,y).r<ret_minr)
				{
					ret_minr=(*m_pImageTensorRGB[i])(x,y).r;
				}

				if((*m_pImageTensorRGB[i])(x,y).g>ret_maxg)
				{
					ret_maxg=(*m_pImageTensorRGB[i])(x,y).g;
				}
				if ((*m_pImageTensorRGB[i])(x,y).g<ret_ming)
				{
					ret_ming=(*m_pImageTensorRGB[i])(x,y).g;
				}

				if((*m_pImageTensorRGB[i])(x,y).b>ret_maxb)
				{
					ret_maxb=(*m_pImageTensorRGB[i])(x,y).b;
				}
				if ((*m_pImageTensorRGB[i])(x,y).b<ret_minb)
				{
					ret_minb=(*m_pImageTensorRGB[i])(x,y).b;
				}

			}
		}
		uchar * dst=(uchar *)pImg[i]->imageData;
		for (y=0; y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				int temp=y*(pImg[i]->widthStep)+3*x;
				dst[temp+2]=(uchar)(((*m_pImageTensorRGB[i])(x,y).r-ret_minr)/(ret_maxr-ret_minr)*256);
				dst[temp+1]=(uchar)(((*m_pImageTensorRGB[i])(x,y).g-ret_ming)/(ret_maxg-ret_ming)*256);
				dst[temp+0]=(uchar)(((*m_pImageTensorRGB[i])(x,y).b-ret_minb)/(ret_maxb-ret_minb)*256);
			}
		}
		ptitle[i].Format(_T("Image Texture of Level %d"),i);
		cvNamedWindow((char *)(LPCTSTR)ptitle[i],CV_WINDOW_AUTOSIZE);
		cvShowImage((char *)(LPCTSTR)ptitle[i],pImg[i]);
	}
	if (pImg != NULL)
	{
		for (i=0;i<m_levels;i++)
		{
			cvReleaseImage(&pImg[i]);
		}
		delete [] pImg;
	}
}
Esempio n. 12
0
void PlateFinder::ImageRestoration(IplImage *src)
{
	int w = src->width;
	int h = src->height;

	IplImage *mImg = cvCreateImage(cvSize(w/2, h/2), IPL_DEPTH_8U, 1);		// Anh su dung cho bien doi hinh thai hoc
	IplImage *src_pyrdown = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);
	IplImage *tmp = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);
	IplImage *thresholed = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);	// Anh nhi phan voi nguong
	IplImage *mini_thresh = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);
	IplImage *dst = cvCreateImage (cvSize(w/2, h/2), IPL_DEPTH_8U, 1);			// Anh lam ro vung bien so

	cvPyrDown (src, src_pyrdown);

	cvMorphologyEx(src_pyrdown, mImg, tmp, S2, CV_MOP_BLACKHAT);
	cvNormalize(mImg, mImg, 0, 255, CV_MINMAX);


	// Nhi phan hoa anh mImg
	cvThreshold(mImg, thresholed, (int)10*cvAvg(mImg).val[0], 255, CV_THRESH_BINARY);
	cvZero(dst);
	cvCopy(thresholed, mini_thresh);

	// Su dung hinh chu nhat co size = 8x16 truot tren toan bo anh
	
	int cnt;
	int nonZero1, nonZero2, nonZero3, nonZero4;
	CvRect rect;

	for (int i = 0; i < mini_thresh->width-32; i+=4)
	{
		for (int j = 0; j  < mini_thresh->height-16; j+=4)
		{
			rect = cvRect(i, j, 16, 8);
			cvSetImageROI (mini_thresh, rect);	//ROI = Region of Interest
			nonZero1 = cvCountNonZero(mini_thresh);
			cvResetImageROI(mini_thresh);

			rect = cvRect(i+16, j, 16, 8);
			cvSetImageROI (mini_thresh, rect);	//ROI = Region of Interest
			nonZero2 = cvCountNonZero(mini_thresh);
			cvResetImageROI(mini_thresh);

			rect = cvRect(i, j+8, 16, 8);
			cvSetImageROI (mini_thresh, rect);	//ROI = Region of Interest
			nonZero3 = cvCountNonZero(mini_thresh);
			cvResetImageROI(mini_thresh);

			rect = cvRect(i+16, j+8, 16, 8);
			cvSetImageROI (mini_thresh, rect);	//ROI = Region of Interest
			nonZero4 = cvCountNonZero(mini_thresh);
			cvResetImageROI(mini_thresh);

			cnt = 0;
			if (nonZero1 > 15) { cnt++; }
			if (nonZero2 > 15) { cnt++; }
			if (nonZero3 > 15) { cnt++; }
			if (nonZero4 > 15) { cnt++; }

			if (cnt > 2)
			{
				rect = cvRect (i, j, 32, 16);
				cvSetImageROI(dst, rect);
				cvSetImageROI(mini_thresh, rect);
				cvCopy(mini_thresh, dst);
				cvResetImageROI(dst);
				cvResetImageROI(mini_thresh);
			}
		}
	}

	IplImage* dst_clone = cvCloneImage(dst);

	cvDilate(dst, dst, NULL, 2);
	cvErode(dst, dst, NULL, 2);
	cvDilate(dst, dst, S1, 9);
	cvErode(dst, dst, S1, 10);
	cvDilate(dst, dst);

	/*cvShowImage("Source" , src);
	cvShowImage("mImg", mImg);	
	cvShowImage("mini_thresh", mini_thresh);	
	cvShowImage("dst_clone", dst_clone);	
	cvShowImage("dst", dst);*/

	cvPyrUp(dst, src);

	cvReleaseImage(&mini_thresh);
	cvReleaseImage(&mImg);
	cvReleaseImage(&tmp);
	cvReleaseImage(&dst);
	cvReleaseImage(&src_pyrdown);
	cvReleaseImage(&thresholed);
	cvReleaseImage(&dst_clone);
}
Esempio n. 13
0
// display function should be good enough
void OpenRadar::DrawRadarData()
{
	int usualColor[15] = {16777215,255,128,65280,32768,
		      16711680,16711935,8421376,65535,32896 }; /*<usual color*/
	CvPoint pt1, pt2;

	cvZero(RadarImage);
	cvCircle(RadarImage, cvPoint(DisplayDx,DisplayDy),3, CV_RGB(0,255,255), -1, 8,0);
	int x,y;
	unsigned char * pPixel = 0;
	int colorIndex = 0, colorRGB;
	int R = 255, G = 0, B = 0;
    
	for (int i = 0; i < RadarDataCnt;i++)
	{  
		if (RadarRho[i] < 0)
		{
			
			//change color
			colorRGB = usualColor[colorIndex];
			R = colorRGB/65536;
			G = (colorRGB%65536)/256;
			B = colorRGB%256;
			colorIndex = (colorIndex + 1)%10;
			
		}
		else 
		{
			x = (int)(RadarRho[i]*cos(RadarTheta[i])/DisplayRatio) + DisplayDx;
			y = (int)(-RadarRho[i]*sin(RadarTheta[i])/DisplayRatio)+ DisplayDy;
	
			if (x >= 0 && x < RadarImageWdith && y >= 0 && y < RadarImageHeight)
			{
				pPixel = (unsigned char*)RadarImage->imageData + y*RadarImage->widthStep + 3*x;
				pPixel[0] = B;
				pPixel[1] = G;
				pPixel[2] = R;
			}
		}     
	}
	
	pt1.x = DisplayDx; pt1.y = DisplayDy;
	pt2.x = DisplayDx+line_length*v_scale*sin(v_angle + 0.5*M_PI); 
	pt2.y = DisplayDy+line_length*v_scale*cos(v_angle + 0.5*M_PI);
	cvLine(RadarImage, pt1, pt2, CV_RGB(255,255,255),2,8,0);

	pt2.x = DisplayDx+line_length*cos(-(-120 + skip_bin_idx * polarH_resolution)* M_PI/180 ); 	
	pt2.y = DisplayDy+line_length*sin(-(-120 + skip_bin_idx * polarH_resolution)* M_PI/180 ); 
	cvLine(RadarImage, pt1, pt2, CV_RGB(0,255,0),1,8,0);

	pt2.x = DisplayDx+line_length*cos(-(-120 + (polarH_length-skip_bin_idx) * polarH_resolution)* M_PI/180 ); 
	pt2.y = DisplayDy+line_length*sin(-(-120 + (polarH_length-skip_bin_idx) * polarH_resolution)* M_PI/180 ); 
	//pt2.x = DisplayDx+line_length*cos(0.25*M_PI); 
	//pt2.y = DisplayDy+line_length*sin(0.25*M_PI);
	//cout<< line_length <<endl; 
	//cout<< pt1.x <<" , " << pt1.y <<endl;
	//cout<< pt2.x <<" , " << pt2.y <<endl;
	cvLine(RadarImage, pt1, pt2, CV_RGB(0,255,0),1,8,0);

	float angle;
	int line_length2;
	for (int i=0; i<polarH_length;i++)
	{
		angle = (-30+i*polarH_resolution)*M_PI/180;
		line_length2 = H[i]/10;
		pt2.x = DisplayDx+line_length2*sin(angle); 
		pt2.y = DisplayDy+line_length2*cos(angle);
		cvCircle(RadarImage, pt2, 2, CV_RGB(255,255,255),1,8,0);
	}

	////////////////////////////////////////////////////////////////////////////////////
	// mine
	////////////////////////////////////////////////////////////////////////////////////
	Mat binImg = Mat::zeros(RadarImageHeight,RadarImageWdith,CV_8UC1);
	vector< Point> centerRaw;
	centerRaw.clear();
	for (int i = 0; i < RadarDataCnt;i++)
	{  
		if (RadarRho[i] > 200)
		{
			x = (int)(RadarRho[i]*cos(RadarTheta[i])/DisplayRatio) + DisplayDx;
			y = (int)(-RadarRho[i]*sin(RadarTheta[i])/DisplayRatio)+ DisplayDy;
			//centerRaw.push_back(Point(x,y));
			//cout<<"P:" <<centerRaw[i].x<<","<<centerRaw[i].y<<endl;
			if (x >= 0 && x < RadarImageWdith && y >= 0 && y < RadarImageHeight)
			{
				 circle( binImg,Point(x,y),1,Scalar(255),-1);
			}
		}     
	}
	imshow("binImg",binImg);
	Mat element = getStructuringElement(MORPH_RECT, Size(1,2));
	Mat element2 = getStructuringElement(MORPH_RECT, Size(10,10));
	erode(binImg, binImg, element);
	morphologyEx(binImg, binImg, MORPH_OPEN, element);
	dilate(binImg, binImg, element2);
	morphologyEx(binImg, binImg, MORPH_CLOSE, element2);
	imshow("dilate",binImg);

	vector< vector<Point> > contours;	
	vector< vector<Point> > filterContours;	
	vector< Vec4i > hierarchy;	
	vector< Point2f> center;
	vector< float > radius;
	vector<Point2f> realPoint;
	

	findContours(binImg, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
	center.resize(contours.size());
	radius.resize(contours.size());
	//realPoint.resize(contours.size());
	for(int i = 0; i< contours.size(); i++)
	{
		minEnclosingCircle(Mat(contours[i]),center[i],radius[i]);//对轮廓进行多变形逼近
		circle(binImg,center[i],650/DisplayRatio,Scalar(255),1); 
		//cout<<"No."<<i<<" | P: "<< center[i].x<<","<<center[i].y<<endl;
		float realX = (center[i].x - DisplayDx) * DisplayRatio;
		float realY = (center[i].y - DisplayDy) * DisplayRatio;

		realPoint.push_back(Point2f(realX,realY));
		//cout<<"No."<<i<<" | P: "<< realPoint[i].x<<","<<realPoint[i].y<<endl;
	}
	imshow("findContours",binImg);
	// colar map
	Mat mapImg = Mat::zeros(RadarImageHeight,RadarImageWdith,CV_8UC3);
	circle(mapImg, Point(DisplayDx,DisplayDy),3, CV_RGB(255,255,255),-1);
	line(mapImg, Point(DisplayDx,DisplayDy), Point(DisplayDx+40,DisplayDy), Scalar(0,0,255),1);
	line(mapImg, Point(DisplayDx,DisplayDy), Point(DisplayDx,DisplayDy+40), Scalar(0,255,0),1);
	for(int i = 0; i< center.size(); i++)
	{
		circle(mapImg,center[i],650/DisplayRatio,Scalar(255,255,0),1,CV_AA); 
		circle(mapImg,center[i],100/DisplayRatio,Scalar(0,255,255),-1); 
	}
	imshow("Map",mapImg);
	////////////////////////////////////
	ukftest::laserPoint msg;
	vector <float> xvec;
	vector <float> yvec;
	for(int i = 0 ; i < realPoint.size(); i++)
	{
		// cm
		xvec.push_back(realPoint[i].x/10.0f);
		yvec.push_back(realPoint[i].y/10.0f);
	}

	// msg
	msg.header.stamp = ros::Time::now();
	msg.header.frame_id = "hokuyo_laser";
	msg.x =xvec;
	msg.y =yvec;
	if(realPoint.size() >0) msg.isBlocking = 1;
	else msg.isBlocking = 0;
	pub_xy. publish(msg);
	
}
Esempio n. 14
0
/* Wrapper function for distance transform group */
CV_IMPL void
cvDistTransform( const void* srcarr, void* dstarr,
                 int distType, int maskSize,
                 const float *mask,
                 void* labelsarr )
{
    CvMat* temp = 0;
    CvMat* src_copy = 0;
    CvMemStorage* st = 0;
    
    CV_FUNCNAME( "cvDistTransform" );

    __BEGIN__;

    float _mask[5] = {0};
    CvMat srcstub, *src = (CvMat*)srcarr;
    CvMat dststub, *dst = (CvMat*)dstarr;
    CvMat lstub, *labels = (CvMat*)labelsarr;
    CvSize size;
    //CvIPPDistTransFunc ipp_func = 0;
    //CvIPPDistTransFunc2 ipp_inp_func = 0;

    CV_CALL( src = cvGetMat( src, &srcstub ));
    CV_CALL( dst = cvGetMat( dst, &dststub ));

    if( !CV_IS_MASK_ARR( src ) || (CV_MAT_TYPE( dst->type ) != CV_32FC1 &&
        (CV_MAT_TYPE(dst->type) != CV_8UC1 || distType != CV_DIST_L1 || labels)) )
        CV_ERROR( CV_StsUnsupportedFormat,
        "source image must be 8uC1 and the distance map must be 32fC1 "
        "(or 8uC1 in case of simple L1 distance transform)" );

    if( !CV_ARE_SIZES_EQ( src, dst ))
        CV_ERROR( CV_StsUnmatchedSizes, "the source and the destination images must be of the same size" );

    if( maskSize != CV_DIST_MASK_3 && maskSize != CV_DIST_MASK_5 && maskSize != CV_DIST_MASK_PRECISE )
        CV_ERROR( CV_StsBadSize, "Mask size should be 3 or 5 or 0 (presize)" );

    if( distType == CV_DIST_C || distType == CV_DIST_L1 )
        maskSize = !labels ? CV_DIST_MASK_3 : CV_DIST_MASK_5;
    else if( distType == CV_DIST_L2 && labels )
        maskSize = CV_DIST_MASK_5;

    if( maskSize == CV_DIST_MASK_PRECISE )
    {
        CV_CALL( icvTrueDistTrans( src, dst ));
        EXIT;
    }
    
    if( labels )
    {
        CV_CALL( labels = cvGetMat( labels, &lstub ));
        if( CV_MAT_TYPE( labels->type ) != CV_32SC1 )
            CV_ERROR( CV_StsUnsupportedFormat, "the output array of labels must be 32sC1" );

        if( !CV_ARE_SIZES_EQ( labels, dst ))
            CV_ERROR( CV_StsUnmatchedSizes, "the array of labels has a different size" );

        if( maskSize == CV_DIST_MASK_3 )
            CV_ERROR( CV_StsNotImplemented,
            "3x3 mask can not be used for \"labeled\" distance transform. Use 5x5 mask" );
    }

    if( distType == CV_DIST_C || distType == CV_DIST_L1 || distType == CV_DIST_L2 )
    {
        icvGetDistanceTransformMask( (distType == CV_DIST_C ? 0 :
            distType == CV_DIST_L1 ? 1 : 2) + maskSize*10, _mask );
    }
    else if( distType == CV_DIST_USER )
    {
        if( !mask )
            CV_ERROR( CV_StsNullPtr, "" );

        memcpy( _mask, mask, (maskSize/2 + 1)*sizeof(float));
    }

    /*if( !labels )
    {
        if( CV_MAT_TYPE(dst->type) == CV_32FC1 )
            ipp_func = (CvIPPDistTransFunc)(maskSize == CV_DIST_MASK_3 ?
                icvDistanceTransform_3x3_8u32f_C1R_p : icvDistanceTransform_5x5_8u32f_C1R_p);
        else if( src->data.ptr != dst->data.ptr )
            ipp_func = (CvIPPDistTransFunc)icvDistanceTransform_3x3_8u_C1R_p;
        else
            ipp_inp_func = icvDistanceTransform_3x3_8u_C1IR_p;
    }*/

    size = cvGetMatSize(src);

    /*if( (ipp_func || ipp_inp_func) && src->cols >= 4 && src->rows >= 2 )
    {
        int _imask[3];
        _imask[0] = cvRound(_mask[0]);
        _imask[1] = cvRound(_mask[1]);
        _imask[2] = cvRound(_mask[2]);

        if( ipp_func )
        {
            IPPI_CALL( ipp_func( src->data.ptr, src->step,
                    dst->data.fl, dst->step, size,
                    CV_MAT_TYPE(dst->type) == CV_8UC1 ?
                    (void*)_imask : (void*)_mask ));
        }
        else
        {
            IPPI_CALL( ipp_inp_func( src->data.ptr, src->step, size, _imask ));
        }
    }
    else*/ if( CV_MAT_TYPE(dst->type) == CV_8UC1 )
    {
        CV_CALL( icvDistanceATS_L1_8u( src, dst ));
    }
    else
    {
        int border = maskSize == CV_DIST_MASK_3 ? 1 : 2;
        CV_CALL( temp = cvCreateMat( size.height + border*2, size.width + border*2, CV_32SC1 ));

        if( !labels )
        {
            CvDistTransFunc func = maskSize == CV_DIST_MASK_3 ?
                icvDistanceTransform_3x3_C1R :
                icvDistanceTransform_5x5_C1R;

            func( src->data.ptr, src->step, temp->data.i, temp->step,
                  dst->data.fl, dst->step, size, _mask );
        }
        else
        {
            CvSeq *contours = 0;
            CvPoint top_left = {0,0}, bottom_right = {size.width-1,size.height-1};
            int label;

            CV_CALL( st = cvCreateMemStorage() );
            CV_CALL( src_copy = cvCreateMat( size.height, size.width, src->type ));
            cvCmpS( src, 0, src_copy, CV_CMP_EQ );
            cvFindContours( src_copy, st, &contours, sizeof(CvContour),
                            CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
            cvZero( labels );
            for( label = 1; contours != 0; contours = contours->h_next, label++ )
            {
                CvScalar area_color = cvScalarAll(label);
                cvDrawContours( labels, contours, area_color, area_color, -255, -1, 8 );
            }

            cvCopy( src, src_copy );
            cvRectangle( src_copy, top_left, bottom_right, cvScalarAll(255), 1, 8 );

            icvDistanceTransformEx_5x5_C1R( src_copy->data.ptr, src_copy->step, temp->data.i, temp->step,
                        dst->data.fl, dst->step, labels->data.i, labels->step, size, _mask );
        }
    }

    __END__;

    cvReleaseMat( &temp );
    cvReleaseMat( &src_copy );
    cvReleaseMemStorage( &st );
}
Esempio n. 15
0
File: Blink.cpp Progetto: sreedal/GC
void Blink::Detect(IplImage* newFrame)
{
	CvMemStorage* storage=cvCreateMemStorage(0);
	CvSeq* contour;
	CvPoint start,end;
	if(prev==NULL && curr!=NULL){
		prev=cvCreateImage(cvSize(240,180),8,1);//cvGetSize(newFrame)
		cvCopy(curr,prev,NULL);
		cvCvtColor(newFrame,tmp,CV_BGR2GRAY);
		cvResize(tmp,curr,1);
	}
	if(curr==NULL){
		curr=cvCreateImage(cvSize(240,180),8,1);//cvGetSize(newFrame)
		tmp=cvCreateImage(cvGetSize(newFrame),8,1);
		cvCvtColor(newFrame,tmp,CV_BGR2GRAY);
		cvResize(tmp,curr,1);
		return;
	}
	if(prev && curr){
		cvCopy(curr,prev,NULL);
		cvCvtColor(newFrame,tmp,CV_BGR2GRAY);
		cvResize(tmp,curr,1);
	}
	//Find Connected Components in the difference image
	assert(curr && prev);
	IplImage* diff=cvCreateImage(cvGetSize(curr),8,1);
	cvZero(diff);
	if(leftEye && rightEye){
		start.x=leftEye->x-BORDER;
		start.y=leftEye->y-BORDER;
		end.x=rightEye->x+rightEye->width+BORDER;
		end.y=rightEye->y+rightEye->height+BORDER;
		cvSetImageROI(curr,cvRect(start.x,start.y,end.x-start.x,end.y-start.y));
		cvSetImageROI(prev,cvRect(start.x,start.y,end.x-start.x,end.y-start.y));
		cvSetImageROI(diff,cvRect(start.x,start.y,end.x-start.x,end.y-start.y));
	}
	cvSub(curr,prev,diff,NULL);
	cvThreshold(diff, diff, 5, 255, CV_THRESH_BINARY);
	cvMorphologyEx(diff, diff, NULL, kernel, CV_MOP_OPEN, 1);
	if(leftEye && rightEye){
		cvResetImageROI(curr);
		cvResetImageROI(prev);
		cvResetImageROI(diff);
	}
	cvShowImage(ONE,diff);
	cvWaitKey(1);
	int nc=cvFindContours(diff,storage,&contour,sizeof(CvContour),CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
	cvClearMemStorage(storage);
	cvReleaseImage(&diff);
	//Check if the components found is a eye pair
	if(nc!=2 || contour==0){
		//Detection Failed try tracking if eyepair from previous image exist
		return;
	}
	CvRect l,r;
	l=cvBoundingRect(contour,1);
	contour=contour->h_next;
	r=cvBoundingRect(contour,1);
	if(abs(l.width-r.width)>5 || abs(l.height-r.height)>5 || abs(l.y-r.y)>5 || (abs(l.x-l.y)/l.width)<2 || (abs(l.x-l.y)/l.width)>5 ){ 
		//Detection Failed
		return;
	}
	//Detected good -> setup variables for future use
	leftEye=&l;
	rightEye=&r;
	if(!leftEyeTracker) leftEyeTracker=new TemplateTracker(1.5,0.1);
	if(!rightEyeTracker) rightEyeTracker=new TemplateTracker(1.5,0.1);
	leftEyeTracker->StartTracking(newFrame,leftEye);
	rightEyeTracker->StartTracking(newFrame,rightEye);
}
Esempio n. 16
0
IplImage* Panoramic::StitchFace(const char *leftName, const char *centerName,const char *rightName)
{
	IplImage   *leftHsvImg,*centerHsvImg,*rightHsvImg;
	vector<Coordinate> leftCoord;
	vector<Coordinate> rightCoord;
	vector<Coordinate> centerCoord;
	vector<Coordinate> profileCoord(3);
	vector<Coordinate> centerAffineCoord(3);
	IplImage   *leftAffineImg     = cvCreateImage(cvSize(m_width,m_height),8,1);
	IplImage   *rightAffineImg    = cvCreateImage(cvSize(m_width,m_height),8,1);
	IplImage   *leftFeatureImg    = cvLoadImage(leftName,1);
	IplImage   *centerFeatureImg  = cvLoadImage(centerName,1);
	IplImage   *rightFeatureImg   = cvLoadImage(rightName,1); 
	cvZero(rightAffineImg);
	cvZero(leftAffineImg);

	//Using red color threshold to find the features from input image
	leftHsvImg  = GetHsvFeature(leftFeatureImg   ,0,255,255,51,51,51);
	centerHsvImg= GetHsvFeature(centerFeatureImg ,0,255,255,51,51,51);
	rightHsvImg = GetHsvFeature(rightFeatureImg  ,0,255,255,51,51,51);
	//FindFeatureCoord will decide whether it continues or not.
    leftCoord   =  FindFeatureCoord(leftHsvImg);
	rightCoord  =  FindFeatureCoord(rightHsvImg);
	centerCoord =  FindFeatureCoord(centerHsvImg);
	
	if(m_do_sttich)//when all number of feature coord = 12,it will be true,it decide in function:"FindFeatureCoord"
	{
		RearrangeCoord(leftCoord);
		RearrangeCoord(rightCoord);
		RearrangeCoord(centerCoord);

		for(int i = 0; i < m_numFeature; i++) 
		{
			m_centerCood[i] = centerCoord[i];
		}
		if(m_debug)
		{
			ShowFeature(leftCoord);
			ShowFeature(centerCoord);
			ShowFeature(rightCoord);
		}
	
		Graphic FindLine;

		for(int numStitch = 0; numStitch < 2;numStitch++)
		{
			for(int num = 0;num < 3;num++)
			{
				if(numStitch == 1)
				{
					if(num==0)
					{
						profileCoord[0]         = leftCoord[1];
						centerAffineCoord[0]    = centerCoord[1];
					}
					else
					{
						profileCoord[num]       = leftCoord[num+2];
						centerAffineCoord[num]  = centerCoord[num+2];
					}
				}

				else
				{
					if(num==0)
					{
						profileCoord[0]			= rightCoord [7];
						centerAffineCoord[0]	= centerCoord[7];
					}
					else
					{
						profileCoord[num]       = rightCoord [num+8];
						centerAffineCoord[num]  = centerCoord[num+8];
					}
				}
			}
			//Á_¦X¥ª°¼Áy
			if(numStitch == 1)
			{
				FindLine.Line(centerAffineCoord,0,centerAffineCoord,2,m_slope,m_intercept);
				DoAffineTrsform(m_leftImg,leftAffineImg,profileCoord,centerAffineCoord);
				if(m_debug)
				{
					cvNamedWindow("leftAffineImg",0);
					cvShowImage("leftAffineImg",leftAffineImg);
				}
				ShowStitch(leftAffineImg,m_centerImg); //°¼ÁyÁ_¦X¡B½u©Ê¼Ò½k¤Æ
			}
			//Á_¦X¥k°¼Áy
			else
			{
				FindLine.Line(centerAffineCoord,0,centerAffineCoord,2,m_slope,m_intercept);
				DoAffineTrsform(m_rightImg,rightAffineImg,profileCoord,centerAffineCoord);
				if(m_debug)
				{
					cvNamedWindow("rightAffineImg",0);
					cvShowImage("rightAffineImg",rightAffineImg);
				}
				ShowStitch(rightAffineImg,m_centerImg);
			}
				m_saveSlope[numStitch]		=	m_slope;
				m_saveIntercept[numStitch]	=	m_intercept;
		
		}

		//Á_¦X¥¿Áy
		for(int j = 0;j < m_height;j++)
		{
			for(int i = 0;i < m_width;i++)
			{
				double linePostionright = m_saveSlope[0]*i + m_saveIntercept[0]-j;
				double linePostionleft  = m_saveSlope[1]*i + m_saveIntercept[1]-j;

				if(linePostionright > m_lineT && linePostionleft > m_lineT)
				{
					double pixel = cvGetReal2D(m_centerImg,j,i);
					cvSetReal2D(m_PanoramicFace,j,i,pixel) ;
				}
			}
		}
		if(m_debug)
		{
			cvNamedWindow("PanoramicFace",0);
			cvShowImage("PanoramicFace",m_PanoramicFace);
			cvWaitKey(0);
		}
		cvReleaseImage(&leftHsvImg);	cvReleaseImage(&centerHsvImg);		cvReleaseImage(&rightHsvImg);
		cvReleaseImage(&leftAffineImg);	cvReleaseImage(&rightAffineImg);
		cvReleaseImage(&leftFeatureImg);cvReleaseImage(&centerFeatureImg);	cvReleaseImage(&rightFeatureImg);
		return m_PanoramicFace;
	}
	else
	{
		printf("Error when stich image....");
		return NULL;
	}
	
}
Esempio n. 17
0
bool TrackingObject::startTracking(IplImage *src, CvRect t_rect, int corners, int min_d, float rate, IplImage *mask) {
    
    if(corners <= 0 || !src ) {
        return false;
    }
    clear();

	loseRate = rate;
    
    corner_count = corners;
    good_count = corners;
    
    cornersA = new CvPoint2D32f[corner_count];
    cornersB = new CvPoint2D32f[corner_count];
    
    //capture_rect = t_rect;
    
    obtainCaptureRect(t_rect, cvSize(src->width, src->height));
    
    CvSize image_size = cvSize(capture_rect.width,capture_rect.height);
    
    
    this->eig_image = cvCreateImage(image_size, IPL_DEPTH_32F, 1);
    this->temp_image = cvCreateImage(image_size, IPL_DEPTH_32F, 1);
    
    CvSize pyr_size = cvSize(image_size.width+8, image_size.height/3);
    
    this->pyrA = cvCreateImage(pyr_size, IPL_DEPTH_32F, 1);
    this->pyrB = cvCreateImage(pyr_size, IPL_DEPTH_32F, 1);
    
    this->imgA = cvCreateImage(image_size, IPL_DEPTH_8U, 1);
    this->imgB = cvCreateImage(image_size, IPL_DEPTH_8U, 1);
    
    
    IplImage *cmask = NULL;
    if(!mask) {
        
        cmask = cvCreateImage(image_size, IPL_DEPTH_8U, 1);
        cvZero(cmask);
        cvSetImageROI(cmask, cvRect(image_size.width/4, image_size.height/4, image_size.width/2, image_size.height/2));
        cvSet(cmask, cvScalar(255));
        cvResetImageROI(cmask);
    }
    else {
        cmask = cvCreateImage(cvGetSize(mask), IPL_DEPTH_8U, 1);
        cvCopy(mask, cmask);
    }
    
    cvSetImageROI(src, capture_rect);
    cvCopy(src, imgA);
    cvResetImageROI(src);
    
    
    cvGoodFeaturesToTrack(imgA, eig_image, temp_image, cornersA, &corner_count, 0.01, min_d, cmask);
    
    cvReleaseImage(&cmask);
    
    features_error = new float[corner_count];
    features_found = new char[corner_count];
    
    int g_x = 0;
    int g_y = 0;
    
    good_count = corner_count;
    
    
    for(int i=0; i<good_count; i++) {
        
        
        CvPoint pt = cvPoint(cvRound(cornersB[i].x),cvRound(cornersB[i].y));
        g_x += pt.x;
        g_y += pt.y;
        
        
    }
    
    
    
    return true;
    
}
Esempio n. 18
0
// Runs the dot detector and sends detected dots to server on port TODO Implement headless. Needs more config options and/or possibly a config file first though
int run( const char *serverAddress, const int serverPort, char headless ) {
    char calibrate_exposure = 0, show = ~0, flip = 0, vflip = 0, done = 0, warp = 0; //"Boolean" values used in this loop
    char noiceReduction = 2; //Small counter, so char is still ok.
    int i, sockfd; //Generic counter
    int dp = 0, minDist = 29, param1 = 0, param2 = 5; // Configuration variables for circle detection 
    int minDotRadius = 1;
    int detected_dots; //Detected dot counter
    int returnValue = EXIT_SUCCESS;
    int captureControl; //File descriptor for low-level camera controls
    int currentExposure = 150;
    int maxExposure = 1250; //Maximum exposure supported by the camera TODO Get this from the actual camera
    Color min = { 0, 70, 0, 0 }; //Minimum color to detect
    Color max = { 255, 255, 255, 0 }; //Maximum color to detect
    CvScalar colorWhite = cvScalar( WHITE ); //Color to draw detected dots on black and white surface
    BoundingBox DD_mask; //The box indicating what should and what should not be considered for dot search
    BoundingBox DD_transform; //The box indicating the plane we are looking at( and as such is the plane we would transform from )
    BoundingBox DD_transform_to; //The plane we are transforming to
    CvCapture *capture = NULL; //The camera
    CvMemStorage *storage; //Low level memory area used for dynamic structures in OpenCV
    CvSeq *seq; //Sequence to store detected dots in
    IplImage *grabbedImage = NULL; //Raw image from camera( plus some overlay in the end )
    IplImage *imgThreshold = NULL; //Image with detected dots
    IplImage *mask = NULL; //Mask to be able to remove uninteresting areas
    IplImage *coloredMask = NULL; //Mask to be able to indicate above mask on output image
    CvFont font; //Font for drawing text on images
    SendQueue *queue; //Head of the linked list that is the send queue
    char strbuf[255]; //Generic buffer for text formatting( with sprintf())
    struct timeval oldTime, time, diff; //Structs for measuring FPS
    float lastKnownFPS = 0; //Calculated FPS
    CvMat* pointRealMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* pointTransMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* transMat = cvCreateMat( 3,3,CV_32FC1 ); //Translation matrix for transforming input to a straight rectangle
    ClickParams clickParams = { TOP_LEFT, NULL, &DD_transform_to, transMat }; //Struct holding data needed by mouse-click callback function

    // Set up network
    sockfd = initNetwork( serverAddress, serverPort );
    if( sockfd == -1 ) {
        fprintf( stderr, "ERROR: initNetwork returned -1\n");
        return EXIT_FAILURE;
    }
    queue = initSendQueue();

    if( openCamera( &capture, &captureControl ) == 0 ) {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        return EXIT_FAILURE;
    }

    if( ( disableAutoExposure( captureControl ) ) == -1 ) {
        fprintf( stderr, "ERROR: Cannot disable auto exposure \n" );
        //return EXIT_FAILURE;
    }

    if( ( updateAbsoluteExposure( captureControl, currentExposure ) ) == 0 ) {
        fprintf( stderr, "ERROR: Cannot set exposure\n");
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( imagewindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the configuration sliders and the detection frame TODO This is kind of a hack. Make a better solution
    cvNamedWindow( configwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the transformed image. Handy to see how the dots are translated, but not needed for functionality
    if( warp ) cvNamedWindow( warpwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create sliders to adjust the lower color boundry
    cvCreateTrackbar( red_lable  , configwindowname, &min.red,   255, NULL );
    cvCreateTrackbar( green_lable, configwindowname, &min.green, 255, NULL );
    cvCreateTrackbar( blue_lable , configwindowname, &min.blue,  255, NULL );

    //Create sliters for the contour based dot detection
    cvCreateTrackbar( min_area_lable, configwindowname, &minDotRadius,255, NULL );

    /* Slider for manual exposure setting */
    cvCreateTrackbar( exposure_lable, configwindowname, &currentExposure, maxExposure, NULL );

    //Create the memory storage
    storage = cvCreateMemStorage( 0 );

    // void cvInitFont( font, font_face, hscale, vscale, shear=0, thickness=1, line_type=8 )
    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8 );

    // Grab an initial image to be able to fetch image size before the main loop.
    grabbedImage = cvQueryFrame( capture );

    //Move the two windows so both are visible at the same time
    cvMoveWindow( imagewindowname, 0, 10 );
    cvMoveWindow( configwindowname, grabbedImage->width+2, 10 );

    //TODO Move these three inits to a function
    // Set masking defaults TODO load from file? Specify file for this file loading?
    DD_mask.topLeft.x = 0;  
    DD_mask.topLeft.y = 0;

    DD_mask.topRight.x = grabbedImage->width-1;
    DD_mask.topRight.y = 0;

    DD_mask.bottomLeft.x = 0;
    DD_mask.bottomLeft.y = grabbedImage->height-1;

    DD_mask.bottomRight.x = grabbedImage->width-1;
    DD_mask.bottomRight.y = grabbedImage->height-1;

    // Set transformation defaults TODO load from file? Specify file for this file loading?
    DD_transform.topLeft.x = 0;  
    DD_transform.topLeft.y = 0;

    DD_transform.topRight.x = grabbedImage->width-1;
    DD_transform.topRight.y = 0;

    DD_transform.bottomLeft.x = 0;
    DD_transform.bottomLeft.y = grabbedImage->height-1;

    DD_transform.bottomRight.x = grabbedImage->width-1;
    DD_transform.bottomRight.y = grabbedImage->height-1;

    // Set the transformation destination
    DD_transform_to.topLeft.x = 0;  
    DD_transform_to.topLeft.y = 0;

    DD_transform_to.topRight.x = grabbedImage->width-1;
    DD_transform_to.topRight.y = 0;

    DD_transform_to.bottomLeft.x = 0;
    DD_transform_to.bottomLeft.y = grabbedImage->height-1;

    DD_transform_to.bottomRight.x = grabbedImage->width-1;
    DD_transform_to.bottomRight.y = grabbedImage->height-1;

    calculateTransformationMatrix( &DD_transform, &DD_transform_to, transMat );

    // Set callback function for mouse clicks
    cvSetMouseCallback( imagewindowname, calibrateClick, ( void* ) &clickParams );

    gettimeofday( &oldTime, NULL );

    // Main loop. Grabbs an image from cam, detects dots, sends dots,and prints dots to images and shows to user
    while( !done ) {

        //PROFILING_PRO_STAMP(); //Uncomment this and the one in the end of the while-loop, and comment all other PROFILING_* to profile main-loop

        // ------ Common actions
        cvClearMemStorage( storage );
        detected_dots = 0;

        //Grab a fram from the camera
        PROFILING_PRO_STAMP();
        grabbedImage = cvQueryFrame( capture );
        PROFILING_POST_STAMP( "cvQueryFrame");

        if( grabbedImage == NULL ) {
            fprintf( stderr, "ERROR: frame is null...\n" );
            getchar();
            returnValue = EXIT_FAILURE;
            break;
        }

        //Flip images to act as a mirror. 
        if( show && flip ) {
            cvFlip( grabbedImage, grabbedImage, 1 );
        }
        if( show && vflip ) {
            cvFlip( grabbedImage, grabbedImage, 0 );
        }

        // ------ State based actions
        switch( state ) {
            case GRAB_DOTS:

                //Create detection image
                imgThreshold = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvInRangeS( grabbedImage, cvScalar( DD_COLOR( min )), cvScalar( DD_COLOR( max )), imgThreshold );

                //Mask away anything not in our calibration area
                mask = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvZero( mask );
                cvFillConvexPoly( mask, ( CvPoint* ) &DD_mask, 4, cvScalar( WHITE ), 1, 0 );
                cvAnd( imgThreshold, mask, imgThreshold, NULL );

                // Invert mask, increase the number of channels in it and overlay on grabbedImage //TODO Tint the mask red before overlaying
                cvNot( mask, mask );
                coloredMask = cvCreateImage( cvGetSize( grabbedImage ), grabbedImage->depth, grabbedImage->nChannels );
                cvCvtColor( mask, coloredMask, CV_GRAY2BGR );
                cvAddWeighted( grabbedImage, 0.95, coloredMask, 0.05, 0.0, grabbedImage );


                // Reduce noise. 
                // Erode is kind of floor() of pixels, dilate is kind of ceil()
                // I'm not sure which gives the best result.
                switch( noiceReduction ) {
                    case 0: break; //No noice reduction at all
                    case 1: cvErode( imgThreshold, imgThreshold, NULL, 2 ); break;
                    case 2: cvDilate( imgThreshold, imgThreshold, NULL, 2 ); break;
                }

                // Warp the warp-image. We are reusing the coloredMask variable to save some space
                PROFILING_PRO_STAMP();
                if( show && warp ) cvWarpPerspective( grabbedImage, coloredMask, transMat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ));
                PROFILING_POST_STAMP( "Warping perspective" );


                // Find all dots in the image
                PROFILING_PRO_STAMP();

                // Clear old data from seq
                seq = 0;

                // Find the dots
                cvFindContours(
                        imgThreshold,
                        storage,
                        &seq,
                        sizeof( CvContour ),
                        CV_RETR_LIST,
                        CV_CHAIN_APPROX_SIMPLE,
                        cvPoint( 0,0 )
                        );
                // cvFindContours destroys the original image, so we wipe it here
                // and then repaints the detected dots later
                cvZero( imgThreshold );

                PROFILING_POST_STAMP( "Dot detection" );

                //Process all detected dots
                PROFILING_PRO_STAMP();
                for( ; seq != 0; seq = seq->h_next ) {

                    // Calculate radius of the detected contour
                    CvRect rect =( ( CvContour * )seq )->rect;
                    float relCenterX = rect.width / 2;
                    float relCenterY = rect.height / 2;

                    // Make sure the dot is big enough
                    if( relCenterX < minDotRadius || relCenterY < minDotRadius ) {
                        continue;
                    }

                    // Note that we have found another dot
                    ++detected_dots;

                    // Transform the detected dot according to transformation matrix.
                    float absCenter[] = { rect.x + relCenterX, rect.y + relCenterY };
                    pointRealMat->data.fl = absCenter;
                    cvPerspectiveTransform( pointRealMat, pointTransMat, transMat );

                    // Draw the detected contour back to imgThreshold
                    // Draw the detected dot both to real image and to warped( if warp is active )
                    if( show ) {
                        cvDrawContours( imgThreshold, seq, colorWhite, colorWhite, -1, CV_FILLED, 8, cvPoint( 0,0 ) );
                        drawCircle( absCenter[0], absCenter[1], ( relCenterX + relCenterY ) / 2, grabbedImage );
                        if( warp ) {
                            drawCircle( pointTransMat->data.fl[0], pointTransMat->data.fl[1], ( relCenterX + relCenterY ) / 2, coloredMask );
                        }
                    }

                    // Add detected dot to to send queue
                    addPointToSendQueue( pointTransMat->data.fl, queue ); 
                }

                PROFILING_POST_STAMP("Painting dots");

                //Calculate framerate
                gettimeofday( &time, NULL );
                timeval_subtract( &diff, &time, &oldTime );
                lastKnownFPS = lastKnownFPS * 0.7 + ( 1000000.0 / diff.tv_usec ) * 0.3; //We naïvly assume we have more then 1 fps
                oldTime = time;

                //Send the dots detected this frame to the server
                PROFILING_PRO_STAMP();
                sendQueue( sockfd, queue );
                clearSendQueue( queue );
                PROFILING_POST_STAMP( "Sending dots" );

                /* If calibrating, do the calibration */
                if( calibrate_exposure ) {
                    int ret;
                    ret = calibrateExposureLow( captureControl, detected_dots, &currentExposure, DD_MAX_EXPOSURE, lastKnownFPS );
                    switch( ret ) {
                        case 0: // We are done. Let's leave calibration mode
                            calibrate_exposure = 0;
                            printf( "done\n" );
                            break;

                        case -1: // We hit the upper limit with no detected dots
                            fprintf( stderr, "Reached upper limit (%d). Aborting!\n", DD_MAX_EXPOSURE );
                            calibrate_exposure = 0;
                            break;

                        case -2: // We hit lower limit with more then one dot detected
                            fprintf( stderr, "Too bright. More then one dot found even with minimal exposure. Aborting!\n");
                            calibrate_exposure = 0;
                            break;

                        case -3: //No conclusive results.
                            fprintf( stderr, "No conclusive results. Giving up\n" );
                            calibrate_exposure = 0;
                            break;
                    }
                }

                break; //End of GRAB_DOTS

            case SELECT_TRANSFORM:
                //Falling through here. Poor man's multi-case clause. Not putting this in default as we might
                //want to do different things in these two some day.
            case SELECT_MASK:
                snprintf( strbuf, sizeof( strbuf ), "Select %s point", pointTranslationTable[clickParams.currentPoint]);
                cvDisplayOverlay( imagewindowname, strbuf, 5 );
                break; //End of SELECT_MASK and SELECT_TRANSFORM
        }

        // Paint the corners of the detecting area and the calibration area
        paintOverlayPoints( grabbedImage, &DD_transform );

        //Print some statistics to the image
        if( show ) {
            snprintf( strbuf, sizeof( strbuf ), "Dots: %i", detected_dots ); //Print number of detected dots to the screen
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 20 ), &font, cvScalar( WHITE ));
            snprintf( strbuf, sizeof( strbuf ), "FPS: %.1f", lastKnownFPS );
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 40 ), &font, cvScalar( WHITE ));
            cvCircle( grabbedImage, cvPoint( 15, 55 ), minDotRadius, cvScalar( min.blue, min.green, min.red, min.alpha ), -1, 8, 0 ); // Colors given in order BGR-A, Blue, Green, Red, Alpha
        }

        //Show images 
        PROFILING_PRO_STAMP();
        if( show ) {
            cvShowImage( configwindowname, imgThreshold );
            cvShowImage( imagewindowname, grabbedImage );
            if( warp ) cvShowImage( warpwindowname, coloredMask );
        }
        PROFILING_POST_STAMP("Showing images");

        //Release the temporary images
        cvReleaseImage( &imgThreshold );
        cvReleaseImage( &mask );
        cvReleaseImage( &coloredMask );

        /* Update exposure if needed */
        updateAbsoluteExposure( captureControl, currentExposure );
        cvSetTrackbarPos( exposure_lable, configwindowname, currentExposure );

        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7( linux version ),
        //remove higher bits using AND operator
        i = ( cvWaitKey( 10 ) & 0xff );
        switch( i ) {
            case 'g': 
                makeCalibrate( &DD_transform, &DD_transform_to, transMat, capture, captureControl, 20 );
                updateAbsoluteExposure( captureControl, currentExposure+1 );
                break;

            case 'e': 
                toggleCalibrationMode( &calibrate_exposure, &currentExposure );
                break; /* Toggles calibration mode */

            case 'c':
                openCamera( &capture, &captureControl );
                break;

            case 's': 
                show = ~show;
                break; //Toggles updating of the image. Can be useful for performance of slower machines... Or as frame freeze

            case 'm': 
                state = SELECT_MASK;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_mask;
                break; //Starts selection of masking area. Will return to dot detection once all four points are set

            case 't':
                state = SELECT_TRANSFORM;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_transform;
                break; //Starts selection of the transformation area. Returns to dot detection when done.

            case 'f':
                flip = ~flip;
                break; //Toggles horizontal flipping of the image
            case 'v':
                vflip = ~vflip;
                break; //Toggles vertical flipping of the image

            case 'w':
                warp = ~warp;
                toggleWarpOutput( warp );
                break; //Toggles showing the warped image

            case 'n':
                noiceReduction = ( noiceReduction + 1 ) % 3;
                break; //Cycles noice reduction algorithm

            case 'q': //falling through here to quit

            case  27: 
                done = 1;
                break; //ESC. Kills the whole thing( in a nice and controlled manner )
        }
        fflush( stdout ); //Make sure everything in the buffer is printed before we go on

        //PROFILING_POST_STAMP("Main loop");
    } //End of main while-loop

    // Release the capture device and do some housekeeping
    cvReleaseImage( &grabbedImage );
    cvReleaseCapture( &capture );
    cvReleaseMemStorage( &storage );
    cvDestroyWindow( imagewindowname );
    cvDestroyWindow( configwindowname );
    if( warp ) cvDestroyWindow( warpwindowname ); //If now warp it is already destroyed
    destroySendQueue( queue );
    close( sockfd );
    close( captureControl );
    return returnValue;
}
Esempio n. 19
0
static int CV_CDECL
icvUpdateGaussianBGModel2( IplImage* curr_frame, CvGaussBGModel2*  bg_model )
{
    //checks
    if ((curr_frame->height!=bg_model->params.nHeight)||(curr_frame->width!=bg_model->params.nWidth)||(curr_frame->nChannels!=bg_model->params.nND))
        CV_Error( CV_StsBadSize, "the image not the same size as the reserved GMM background model");

    float alpha=bg_model->params.fAlphaT;
    bg_model->countFrames++;

    //faster initial updates - increase value of alpha
    if (bg_model->params.bInit){
        float alphaInit=(1.0f/(2*bg_model->countFrames+1));
        if (alphaInit>alpha)
        {
            alpha = alphaInit;
        }
        else
        {
            bg_model->params.bInit = 0;
        }
    }

    //update background
    //icvUpdatePixelBackgroundGMM2( curr_frame, bg_model->foreground, bg_model->data.rGMM,bg_model->data.rnUsedModes,&(bg_model->params),alpha);
    icvUpdatePixelBackgroundGMM2( curr_frame, bg_model->foreground, bg_model->data.rGMM,bg_model->data.rnUsedModes,
                                 bg_model->params.nM,
                                 bg_model->params.fTb,
                                 bg_model->params.fTB,
                                 bg_model->params.fTg,
                                 bg_model->params.fVarInit,
                                 bg_model->params.fVarMax,
                                 bg_model->params.fVarMin,
                                 bg_model->params.fCT,
                                 bg_model->params.fTau,
                                 bg_model->params.bShadowDetection,
                                 bg_model->params.nShadowDetection,
                                 alpha);

    //foreground filtering
    if (bg_model->params.bPostFiltering==1)
    {
        int region_count = 0;
        CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;


        //filter small regions
        cvClearMemStorage(bg_model->storage);

        cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_OPEN, 1 );
        cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_CLOSE, 1 );

        cvFindContours( bg_model->foreground, bg_model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
        for( seq = first_seq; seq; seq = seq->h_next )
        {
            CvContour* cnt = (CvContour*)seq;
            if( cnt->rect.width * cnt->rect.height < bg_model->params.minArea )
            {
                //delete small contour
                prev_seq = seq->h_prev;
                if( prev_seq )
                {
                    prev_seq->h_next = seq->h_next;
                    if( seq->h_next ) seq->h_next->h_prev = prev_seq;
                }
                else
                {
                    first_seq = seq->h_next;
                    if( seq->h_next ) seq->h_next->h_prev = NULL;
                }
            }
            else
            {
                region_count++;
            }
        }
        bg_model->foreground_regions = first_seq;
        cvZero(bg_model->foreground);
        cvDrawContours(bg_model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);

        return region_count;
    }

    return 1;
}
Esempio n. 20
0
void Window::refresh()
{
		cvZero(image);
		cvRectangle(image,cvPoint(0,0),cvPoint(width,heigth),CV_RGB(255,255,255),-1);	
}
Esempio n. 21
0
int main(int argc, char* argv[])
{
	IplImage* color = cvLoadImage("E:\\pic_skindetect\\clothtest\\2.jpg", 1);
	IplImage* gray = cvCreateImage(cvGetSize(color), 8, 1);
	IplImage* show = cvCreateImage(cvGetSize(color), 8, 1);
	cvZero(show);
	int i = 0;

	cvCvtColor(color, gray, CV_RGB2GRAY);
	//cvThreshold(gray, gray, 100, 255, CV_THRESH_BINARY_INV);
	cvCanny(gray, gray, 50, 150, 3); 
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSeq* contours;
	CvSeq* seq_fourier = cvCreateSeq(CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),sizeof(CvPoint2D32f), storage);
	cvFindContours(gray, storage, &contours, sizeof(CvContour), CV_RETR_TREE);

	CvSeq* mostContours = contours;
	/*for(; contours; contours = contours->h_next)
	{
		if (mostContours->total < contours->total)
		{
			mostContours = contours;
		}
	}*/

	int t = 0;
	for(; contours; contours = contours->h_next)
	{
	//contours = mostContours;
		++t;
		printf("%d\n", contours->total);
		cvDrawContours(color, contours, CV_RGB(255,0,0), CV_RGB(255,0,0), 1, 3);
		CalcFourierDescriptorCoeff(contours, 2000, seq_fourier);
		CalcBoundary(seq_fourier, contours->total, contours);

		for(int i = 0; i < contours->total; i++)
		{
			CvPoint* pt=(CvPoint*)cvGetSeqElem(contours, i);
			if(pt->x >= 0 && pt->x < show->width && pt->y >= 0 && pt->y < show->height)
			{
				((uchar*)(show->imageData+pt->y*show->widthStep))[pt->x] = 255;
			}
		}

		/*for(i = 0; i < contours->total; i++)
		{
			CvPoint* pt=(CvPoint*)cvGetSeqElem(contours, i);
			printf("%d, %d, %d\n", pt->x, pt->y, i);
		}*/
/*
		for(i = 0; i < seq_fourier->total; i++)
		{
			CvPoint2D32f* pt=(CvPoint2D32f*)cvGetSeqElem(seq_fourier, i);
			printf("%f, %f, %d\n", pt->x, pt->y, i);
		}*/
	}
	printf("t=%d\n", t);

	cvNamedWindow("color", 0);
	cvShowImage("color",color);
	//cvWaitKey(0);

	cvNamedWindow("gray", 0);
	cvShowImage("gray", gray);
	//cvWaitKey(0);

	cvNamedWindow("reconstructed", 0);
	cvShowImage("reconstructed", show);
	cvWaitKey(0);
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&color);
	cvReleaseImage(&gray);
	cvReleaseImage(&show);
	cvDestroyAllWindows();
	return 0;
}
Esempio n. 22
0
void
cvInitUndistortRectifyMap( const CvMat* A, const CvMat* distCoeffs,
    const CvMat *R, const CvMat* Ar, CvArr* mapxarr, CvArr* mapyarr )
{
    CV_FUNCNAME( "cvInitUndistortMap" );

    __BEGIN__;
    
    double a[9], ar[9], r[9], ir[9], k[5]={0,0,0,0,0};
    int coi1 = 0, coi2 = 0;
    CvMat mapxstub, *_mapx = (CvMat*)mapxarr;
    CvMat mapystub, *_mapy = (CvMat*)mapyarr;
    CvMat _a = cvMat( 3, 3, CV_64F, a );
    CvMat _k = cvMat( 4, 1, CV_64F, k );
    CvMat _ar = cvMat( 3, 3, CV_64F, ar );
    CvMat _r = cvMat( 3, 3, CV_64F, r );
    CvMat _ir = cvMat( 3, 3, CV_64F, ir );
    int i, j;
    double fx, fy, u0, v0, k1, k2, k3, p1, p2;
    CvSize size;

    CV_CALL( _mapx = cvGetMat( _mapx, &mapxstub, &coi1 ));
    CV_CALL( _mapy = cvGetMat( _mapy, &mapystub, &coi2 ));

    if( coi1 != 0 || coi2 != 0 )
        CV_ERROR( CV_BadCOI, "The function does not support COI" );

    if( CV_MAT_TYPE(_mapx->type) != CV_32FC1 )
        CV_ERROR( CV_StsUnsupportedFormat, "Both maps must have 32fC1 type" );

    if( !CV_ARE_TYPES_EQ( _mapx, _mapy ))
        CV_ERROR( CV_StsUnmatchedFormats, "" );

    if( !CV_ARE_SIZES_EQ( _mapx, _mapy ))
        CV_ERROR( CV_StsUnmatchedSizes, "" );

    if( A )
    {
        if( !CV_IS_MAT(A) || A->rows != 3 || A->cols != 3  ||
            (CV_MAT_TYPE(A->type) != CV_32FC1 && CV_MAT_TYPE(A->type) != CV_64FC1) )
            CV_ERROR( CV_StsBadArg, "Intrinsic matrix must be a valid 3x3 floating-point matrix" );
        cvConvert( A, &_a );
    }
    else
        cvSetIdentity( &_a );

    if( Ar )
    {
        CvMat Ar33;
        if( !CV_IS_MAT(Ar) || Ar->rows != 3 || (Ar->cols != 3 && Ar->cols != 4) ||
            (CV_MAT_TYPE(Ar->type) != CV_32FC1 && CV_MAT_TYPE(Ar->type) != CV_64FC1) )
            CV_ERROR( CV_StsBadArg, "The new intrinsic matrix must be a valid 3x3 floating-point matrix" );
        cvGetCols( Ar, &Ar33, 0, 3 );
        cvConvert( &Ar33, &_ar );
    }
    else
        cvSetIdentity( &_ar );

    if( !CV_IS_MAT(R) || R->rows != 3 || R->cols != 3  ||
        (CV_MAT_TYPE(R->type) != CV_32FC1 && CV_MAT_TYPE(R->type) != CV_64FC1) )
        CV_ERROR( CV_StsBadArg, "Rotaion/homography matrix must be a valid 3x3 floating-point matrix" );

    if( distCoeffs )
    {
        CV_ASSERT( CV_IS_MAT(distCoeffs) &&
            (distCoeffs->rows == 1 || distCoeffs->cols == 1) &&
            (distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) == 4 ||
            distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) == 5) &&
            (CV_MAT_DEPTH(distCoeffs->type) == CV_64F ||
            CV_MAT_DEPTH(distCoeffs->type) == CV_32F) );
        _k = cvMat( distCoeffs->rows, distCoeffs->cols,
                CV_MAKETYPE(CV_64F, CV_MAT_CN(distCoeffs->type)), k );
        cvConvert( distCoeffs, &_k );
    }
    else
        cvZero( &_k );
    
    cvConvert( R, &_r );    // rectification matrix
    cvMatMul( &_ar, &_r, &_r ); // Ar*R
    cvInvert( &_r, &_ir );  // inverse: R^-1*Ar^-1

    u0 = a[2]; v0 = a[5];
    fx = a[0]; fy = a[4];
    k1 = k[0]; k2 = k[1]; k3 = k[4];
    p1 = k[2]; p2 = k[3];

    size = cvGetMatSize(_mapx);

    for( i = 0; i < size.height; i++ )
    {
        float* mapx = (float*)(_mapx->data.ptr + _mapx->step*i);
        float* mapy = (float*)(_mapy->data.ptr + _mapy->step*i);
        double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8];

        for( j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] )
        {
            double w = 1./_w, x = _x*w, y = _y*w;
            double x2 = x*x, y2 = y*y;
            double r2 = x2 + y2, _2xy = 2*x*y;
            double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2;
            double u = fx*(x*kr + p1*_2xy + p2*(r2 + 2*x2)) + u0;
            double v = fy*(y*kr + p1*(r2 + 2*y2) + p2*_2xy) + v0; 
            mapx[j] = (float)u;
            mapy[j] = (float)v;
        }
    }

    __END__;
}
void THISCLASS::OnStep() {
	std::vector<Particle> rejectedparticles;

	// Get and check input image
	IplImage *inputimage = cvCloneImage(mCore->mDataStructureImageBinary.mImage);
	IplImage *outputImage = mCore->mDataStructureImageBinary.mImage;
	//mCore->mDataStructureImageBinary.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 1) {
		AddError(wxT("The input image is not a grayscale image."));
		return;
	}
	cvZero(outputImage);

	// We clear the ouput vector
	mParticles.clear();

	// Initialization
	Particle tmpParticle; // Used to put the calculated value in memory
	CvMoments moments; // Used to calculate the moments
	std::vector<Particle>::iterator j; // Iterator used to stock the particles by size

	// We allocate memory to extract the contours from the binary image
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* contour = 0;


	// Init blob extraxtion
	CvContourScanner blobs = cvStartFindContours(inputimage, storage, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

	// This is used to correct the position in case of ROI
	CvRect rectROI;
	if (inputimage->roi != NULL) {
		rectROI = cvGetImageROI(inputimage);
	} else {
		rectROI.x = 0;
		rectROI.y = 0;
	}

	while ((contour = cvFindNextContour(blobs)) != NULL) {
		// Computing the moments
		cvMoments(contour, &moments);

		// Computing particle area
		tmpParticle.mArea = moments.m00;
		tmpParticle.mCenter.x = (float)(rectROI.x + (moments.m10 / moments.m00 + 0.5));  // moments using Green theorem
		tmpParticle.mCenter.y = (float)(rectROI.y + (moments.m01 / moments.m00 + 0.5));  // m10 = x direction, m01 = y direction, m00 = area as edicted in theorem

		// Selection based on area
		if ((mAreaSelection == false) || ((tmpParticle.mArea <= mMaxArea) && (tmpParticle.mArea >= mMinArea)))
		{
			tmpParticle.mCompactness = GetContourCompactness(contour);
			if ((mCompactnessSelection == false) || ((tmpParticle.mCompactness > mMinCompactness) && (tmpParticle.mCompactness < mMaxCompactness)))
			{
				double tempValue = cvGetCentralMoment(&moments, 2, 0) - cvGetCentralMoment(&moments, 0, 2);
				tmpParticle.mOrientation = atan(2 * cvGetCentralMoment(&moments, 1, 1) / (tempValue + sqrt(tempValue * tempValue + 4 * cvGetCentralMoment(&moments, 1, 1) * cvGetCentralMoment(&moments, 1, 1))));
				if ((mOrientationSelection == false) || (((tmpParticle.mOrientation > mMinOrientation) && (tmpParticle.mOrientation < mMaxOrientation)) || ((tmpParticle.mOrientation > mMinOrientation + PI) && (tmpParticle.mOrientation < mMaxOrientation + PI)) || ((tmpParticle.mOrientation > mMinOrientation - PI) && (tmpParticle.mOrientation < mMaxOrientation - PI))))
				{
					cvDrawContours(outputImage, contour, cvScalarAll(255), cvScalarAll(255), 0, CV_FILLED);
					// Check if we have already enough particles
					if (mParticles.size() == mMaxNumber)
					{
						// If the particle is bigger than the smallest stored particle, store it, else do nothing
						if (tmpParticle.mArea > mParticles.back().mArea)
						{
							// Find the place were it must be inserted, sorted by size
							for (j = mParticles.begin(); (j != mParticles.end()) && (tmpParticle.mArea < (*j).mArea); j++);

							// Fill unused values
							tmpParticle.mID = -1;
							tmpParticle.mIDCovariance = -1;

							// Insert the particle
							mParticles.insert(j, tmpParticle);
							// Remove the smallest one
							mParticles.pop_back();
						}
					}
					else
					{
						// The particle is added at the correct place
						// Find the place were it must be inserted, sorted by size
						for (j = mParticles.begin(); (j != mParticles.end()) && (tmpParticle.mArea < (*j).mArea); j++);

						// Fill unused values
						tmpParticle.mID = -1;
						tmpParticle.mIDCovariance = -1;

						// Insert the particle
						mParticles.insert(j, tmpParticle);
					}
				}
			}
		}
		else
		{
			rejectedparticles.push_back(tmpParticle);
		}
		cvRelease((void**)&contour);
	}
	contour = cvEndFindContours(&blobs);

	// If we need to display the particles
	/* if(trackingimg->GetDisplay())
	{
		for(j=rejectedparticles.begin();j!=rejectedparticles.end();j++)
		{
			trackingimg->DrawCircle(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(255,0,0));
		}
		for(j=particles.begin();j!=particles.end();j++)
		{
			trackingimg->DrawCircle(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(0,255,0));
			trackingimg->Cover(cvPoint((int)(((*j).p).x),(int)(((*j).p).y)),CV_RGB(255,0,0),2);
		}
	} */

	cvReleaseImage(&inputimage);
	cvRelease((void**)&contour);
	cvReleaseMemStorage(&storage);

	// Set these particles
	mCore->mDataStructureParticles.mParticles = &mParticles;

	// Let the DisplayImage know about our image
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetParticles(&mParticles);
		de.SetMainImage(mCore->mDataStructureImageBinary.mImage);
	}
}
Esempio n. 24
0
int main()
{
	// Load the image we'll work on
	IplImage* img = cvLoadImage("C:\\goal_arena.jpg");
	CvSize imgSize = cvGetSize(img);

	// This will hold the white parts of the image
	IplImage* detected = cvCreateImage(imgSize, 8, 1);

	// These hold the three channels of the loaded image
	IplImage* imgBlue = cvCreateImage(imgSize, 8, 1);
	IplImage* imgGreen = cvCreateImage(imgSize, 8, 1);
	IplImage* imgRed = cvCreateImage(imgSize, 8, 1);
	cvSplit(img, imgBlue, imgGreen, imgRed, NULL);

	// Extract white parts into detected
	cvAnd(imgGreen, imgBlue, detected);
	cvAnd(detected, imgRed, detected);

	// Morphological opening
	cvErode(detected, detected);
	cvDilate(detected, detected);

	// Thresholding (I knew you wouldn't catch this one... so i wrote a comment here
	// I mean the command can be so decieving at times)
	cvThreshold(detected, detected, 100, 250, CV_THRESH_BINARY);

	// Do the hough thingy
	CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
	cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);
	
	// The two endpoints for each boundary line
	CvPoint left1 = cvPoint(0, 0);
	CvPoint left2 = cvPoint(0, 0);
	CvPoint right1 = cvPoint(0, 0);
	CvPoint right2 = cvPoint(0, 0);
	CvPoint top1 = cvPoint(0, 0);
	CvPoint top2 = cvPoint(0, 0);
	CvPoint bottom1 = cvPoint(0, 0);
	CvPoint bottom2 = cvPoint(0, 0);

	// Some numbers we're interested in
	int numLines = lines->rows;
	int numTop = 0;
	int numBottom = 0;
	int numLeft = 0;
	int numRight = 0;

	// Iterate through each line
	for(int i=0;i<numLines;i++)
	{
		// Get the parameters for the current line
		CvScalar dat = cvGet1D(lines, i);
		double rho = dat.val[0];
		double theta = dat.val[1];
		
		if(theta==0.0)
		{
			// This is an obviously vertical line... and we can't approximate it... NEXT
			continue;
		}

		// Convert from radians to degrees
		double degrees = theta*180/(3.1412);
		
		// Generate two points on this line (one at x=0 and one at x=image's width)
		CvPoint pt1 = cvPoint(0, rho/sin(theta));
		CvPoint pt2 = cvPoint(img->width, (-img->width/tan(theta)) + rho/sin(theta));
		
		if(abs(rho)<50)		// Top + left
		{
			if(degrees>45 && degrees<135)	// Top
			{
				numTop++;

				// The line is horizontal and near the top
				top1.x+=pt1.x;
				top1.y+=pt1.y;
			
				top2.x+=pt2.x;
				top2.y+=pt2.y;
			}
			else	// left
			{
				numLeft++;

				//The line is vertical and near the left
				left1.x+=pt1.x;
				left1.y+=pt1.y;
			
				left2.x+=pt2.x;
				left2.y+=pt2.y;
			}
		}
		else // bottom+right
		{
			if(degrees>45 && degrees<135)	// Bottom
			{
				numBottom++;

				//The line is horizontal and near the bottom
				bottom1.x+=pt1.x;
				bottom1.y+=pt1.y;
			
				bottom2.x+=pt2.x;
				bottom2.y+=pt2.y;
			}
			else	// Right
			{
				numRight++;

				// The line is vertical and near the right
				right1.x+=pt1.x;
				right1.y+=pt1.y;
				
				right2.x+=pt2.x;
				right2.y+=pt2.y;
			}
		}
	}

	// we've done the adding... now the dividing to get the "averaged" point
	left1.x/=numLeft;
	left1.y/=numLeft;
	left2.x/=numLeft;
	left2.y/=numLeft;

	right1.x/=numRight;
	right1.y/=numRight;
	right2.x/=numRight;
	right2.y/=numRight;

	top1.x/=numTop;
	top1.y/=numTop;
	top2.x/=numTop;
	top2.y/=numTop;

	bottom1.x/=numBottom;
	bottom1.y/=numBottom;
	bottom2.x/=numBottom;
	bottom2.y/=numBottom;

	// Render these lines onto the image
	cvLine(img, left1, left2, CV_RGB(255, 0,0), 1);
	cvLine(img, right1, right2, CV_RGB(255, 0,0), 1);
	cvLine(img, top1, top2, CV_RGB(255, 0,0), 1);
	cvLine(img, bottom1, bottom2, CV_RGB(255, 0,0), 1);

	// Next, we need to figure out the four intersection points
	double leftA = left2.y-left1.y;
	double leftB = left1.x-left2.x;
	double leftC = leftA*left1.x + leftB*left1.y;

	double rightA = right2.y-right1.y;
	double rightB = right1.x-right2.x;
	double rightC = rightA*right1.x + rightB*right1.y;

	double topA = top2.y-top1.y;
	double topB = top1.x-top2.x;
	double topC = topA*top1.x + topB*top1.y;

	double bottomA = bottom2.y-bottom1.y;
	double bottomB = bottom1.x-bottom2.x;
	double bottomC = bottomA*bottom1.x + bottomB*bottom1.y;

	// Intersection of left and top
	double detTopLeft = leftA*topB - leftB*topA;
	CvPoint ptTopLeft = cvPoint((topB*leftC - leftB*topC)/detTopLeft, (leftA*topC - topA*leftC)/detTopLeft);

	// Intersection of top and right
	double detTopRight = rightA*topB - rightB*topA;
	CvPoint ptTopRight = cvPoint((topB*rightC-rightB*topC)/detTopRight, (rightA*topC-topA*rightC)/detTopRight);

	// Intersection of right and bottom
	double detBottomRight = rightA*bottomB - rightB*bottomA;
	CvPoint ptBottomRight = cvPoint((bottomB*rightC-rightB*bottomC)/detBottomRight, (rightA*bottomC-bottomA*rightC)/detBottomRight);

	// Intersection of bottom and left
	double detBottomLeft = leftA*bottomB-leftB*bottomA;
	CvPoint ptBottomLeft = cvPoint((bottomB*leftC-leftB*bottomC)/detBottomLeft, (leftA*bottomC-bottomA*leftC)/detBottomLeft);

	// Render the points onto the image
	cvLine(img, ptTopLeft, ptTopLeft, CV_RGB(0,255,0), 5);
	cvLine(img, ptTopRight, ptTopRight, CV_RGB(0,255,0), 5);
	cvLine(img, ptBottomRight, ptBottomRight, CV_RGB(0,255,0), 5);
	cvLine(img, ptBottomLeft, ptBottomLeft, CV_RGB(0,255,0), 5);

	// Initialize a mask
	IplImage* imgMask = cvCreateImage(imgSize, 8, 3);
	cvZero(imgMask);

	// Generate the mask
	CvPoint* pts = new CvPoint[4];
	pts[0] = ptTopLeft;
	pts[1] = ptTopRight;
	pts[2] = ptBottomRight;
	pts[3] = ptBottomLeft;
	cvFillConvexPoly(imgMask, pts, 4, cvScalar(255,255,255));

	// Delete anything thats outside the mask
	cvAnd(img, imgMask, img);

	// Show all images in windows
	cvNamedWindow("Original");
	cvNamedWindow("Detected");

	cvShowImage("Original", img);
	cvShowImage("Detected", detected);

	cvWaitKey(0);

	return 0;
}
Esempio n. 25
0
void VarFlow::gauss_seidel_recursive(int current_level, int max_level, int first_level, float h, 
										IplImage** J13_array, IplImage** J23_array){
                                
    if(current_level == max_level){
         
        // Iterate normally n1 times and that's it
        gauss_seidel_iteration(current_level, h,  n1, J13_array, J23_array);
                     
    }
    
    else{
        
        //---------------------------- Start 1st V cycle -------------------------------------
     
        // Iterate n1 times
        gauss_seidel_iteration(current_level, h, n1, J13_array, J23_array); 
        
        // Calculate residual
        calculate_residual(current_level, h, J13_array, J23_array);
                               
        // Apply restriction operator to residual
        cvResize(imgU_res_err_array[current_level], imgU_res_err_array[current_level+1], CV_INTER_LINEAR);
        cvResize(imgV_res_err_array[current_level], imgV_res_err_array[current_level+1], CV_INTER_LINEAR);
        
        // Initialize new u and v images to zero
        cvZero(imgU_array[current_level+1]);
        cvZero(imgV_array[current_level+1]);
        
        // Pass residual down recursively (Important: switch J13 and J23 with imgU_res_err and imgV_res_err, increase h!!)  
        gauss_seidel_recursive(current_level+1, max_level, first_level, 2*h, imgU_res_err_array, imgV_res_err_array);
                 
        // Prolong solution to get error at current level                            
        cvResize(imgU_array[current_level+1], imgU_res_err_array[current_level], CV_INTER_LINEAR);
        cvResize(imgV_array[current_level+1], imgV_res_err_array[current_level], CV_INTER_LINEAR);
        
        // Correct original solution with error
        cvAdd(imgU_array[current_level], imgU_res_err_array[current_level], imgU_array[current_level], NULL);
        cvAdd(imgV_array[current_level], imgV_res_err_array[current_level], imgV_array[current_level], NULL);
        
        // Iterate n1+n2 times to smooth new solution
        gauss_seidel_iteration(current_level, h, n1+n2, J13_array, J23_array); 
        
                               
       //---------------------------- End 1st V cycle, Start 2nd V cycle -------------------------------------                        
                          
        // Calculate residual again
        calculate_residual(current_level,h, J13_array, J23_array);
                               
        // Apply restriction operator to residual
        cvResize(imgU_res_err_array[current_level], imgU_res_err_array[current_level+1], CV_INTER_LINEAR);
        cvResize(imgV_res_err_array[current_level], imgV_res_err_array[current_level+1], CV_INTER_LINEAR);
        
        // Initialize new u and v images to zero
        cvZero(imgU_array[current_level+1]);
        cvZero(imgV_array[current_level+1]);
        
        
        // Pass residual down recursively (Important: switch J13 and J23 with imgU_res_err and imgV_res_err, increase h!!)      
        gauss_seidel_recursive(current_level+1, max_level, first_level, 2*h, imgU_res_err_array, imgV_res_err_array);
               
        // Prolong solution to get error at current level                            
        cvResize(imgU_array[current_level+1], imgU_res_err_array[current_level], CV_INTER_LINEAR);
        cvResize(imgV_array[current_level+1], imgV_res_err_array[current_level], CV_INTER_LINEAR);
        
        // Correct original solution with error
        cvAdd(imgU_array[current_level], imgU_res_err_array[current_level], imgU_array[current_level], NULL);
        cvAdd(imgV_array[current_level], imgV_res_err_array[current_level], imgV_array[current_level], NULL);
        
        // Iterate n2 times to smooth new solution
        gauss_seidel_iteration(current_level, h, n2, J13_array, J23_array); 
        
                               
        //---------------------------- End 2nd V cycle -------------------------------------
              
    }
                                
}
Esempio n. 26
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

    printf("Nonzero count %d\n", cvCountNonZero(mask));

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                                      cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
Esempio n. 27
0
// perform autocalibration using absolute quadric
bool mvg_autocalibration(CvMat ** Ps, double * principal_points, const size_t n, CvMat ** Xs, const size_t m)
{
	if (n < 3)
	{
		return false;
	}

	printf("*****************************************\n");
	opencv_debug("First camera before transformation", Ps[0]);

	// move the principal point to the origin for every camera
	// and use canonical first camera
	CvMat * T = opencv_create_matrix(3, 3);
	CvMat * S = opencv_create_matrix(3, 3);
	CvMat * G = opencv_create_matrix(4, 4);	
	CvMat * H = opencv_create_matrix(4, 4);
	for (size_t i = 0; i < n; i++) 
	{
		// set up translation matrix 
		cvZero(T);
		OPENCV_ELEM(T, 0, 0) = 1;
		OPENCV_ELEM(T, 1, 1) = 1;
		OPENCV_ELEM(T, 2, 2) = 1;
		OPENCV_ELEM(T, 0, 2) = -principal_points[2 * i + 0]; 
		OPENCV_ELEM(T, 1, 2) = -principal_points[2 * i + 1]; 

		// apply it to the projection matrix
		cvMatMul(T, Ps[i], Ps[i]);

		// also scale 
		cvZero(S);
		OPENCV_ELEM(S, 0, 0) = 0.001; 
		OPENCV_ELEM(S, 1, 1) = 0.001; 
		OPENCV_ELEM(S, 2, 2) = 1; 
		cvMatMul(S, Ps[i], Ps[i]);

		// calculate the world-space homography which transforms P_1 to [I_3x3 | 0]
		if (i == 0) 
		{
			cvZero(G);
			OPENCV_ELEM(G, 3, 3) = 1;
			for (int i = 0; i < 3; i++)  
			{
				for (int j = 0; j < 4; j++) 
				{
					OPENCV_ELEM(G, i, j) = OPENCV_ELEM(Ps[0], i, j);
				}
			}
			cvInvert(G, H, CV_SVD);
		}

		// apply the homography to every camera
		cvMatMul(Ps[i], H, Ps[i]);
	}

	// also apply inverse homography to all the points
	for (size_t i = 0; i < m; i++)
	{
		cvMatMul(G, Xs[i], Xs[i]);
	}

	// debug
	opencv_debug("First camera", Ps[0]);
	opencv_debug("Transformed using this transformation", H);

	printf("*****************************************\n");
	printf("List of all cameras:\n");
	for (size_t i = 0; i < n; i++) 
	{
		opencv_debug("Camera", Ps[i]);
	}

	cvReleaseMat(&S);
	cvReleaseMat(&T); 
	cvReleaseMat(&H);
	cvReleaseMat(&G);
	
	// construct system of linear equations 
	CvMat * W = opencv_create_matrix(4 * (n - 1), 5), * b = opencv_create_matrix(4 * (n - 1), 1);
	for (size_t i = 1; i < n; i++) 
	{
		const double
			p11 = OPENCV_ELEM(Ps[i], 0, 0),
			p12 = OPENCV_ELEM(Ps[i], 0, 1),
			p13 = OPENCV_ELEM(Ps[i], 0, 2),
			p14 = OPENCV_ELEM(Ps[i], 0, 3),

			p21 = OPENCV_ELEM(Ps[i], 1, 0),
			p22 = OPENCV_ELEM(Ps[i], 1, 1),
			p23 = OPENCV_ELEM(Ps[i], 1, 2),
			p24 = OPENCV_ELEM(Ps[i], 1, 3),

			p31 = OPENCV_ELEM(Ps[i], 2, 0),
			p32 = OPENCV_ELEM(Ps[i], 2, 1),
			p33 = OPENCV_ELEM(Ps[i], 2, 2),
			p34 = OPENCV_ELEM(Ps[i], 2, 3)
		;

		const double
			p11_2 = sq(p11),
			p12_2 = sq(p12),
			p21_2 = sq(p21), 
			p22_2 = sq(p22), 
			p14_2 = sq(p14),
			p24_2 = sq(p24),
			p13_2 = sq(p13),
			p23_2 = sq(p23)
		;

		OPENCV_ELEM(W, (i - 1) * 4 + 0, 0) = p11_2 + p12_2 - p21_2 - p22_2;
		OPENCV_ELEM(W, (i - 1) * 4 + 0, 1) = 2 * p11 * p14 - 2 * p21 * p24;
		OPENCV_ELEM(W, (i - 1) * 4 + 0, 2) = 2 * p12 * p14 - 2 * p22 * p24;
		OPENCV_ELEM(W, (i - 1) * 4 + 0, 3) = 2 * p13 * p14 - 2 * p23 * p24;
		OPENCV_ELEM(W, (i - 1) * 4 + 0, 4) = p14_2 - p24_2;
		OPENCV_ELEM(b, (i - 1) * 4 + 0, 0) = -(p13_2 - p23_2);

		OPENCV_ELEM(W, (i - 1) * 4 + 1, 0) = p11 * p21 + p12 * p22;
		OPENCV_ELEM(W, (i - 1) * 4 + 1, 1) = p14 * p21 + p11 * p24;
		OPENCV_ELEM(W, (i - 1) * 4 + 1, 2) = p14 * p22 + p12 * p24;
		OPENCV_ELEM(W, (i - 1) * 4 + 1, 3) = p14 * p23 + p13 * p24;
		OPENCV_ELEM(W, (i - 1) * 4 + 1, 4) = p14 * p24;
		OPENCV_ELEM(b, (i - 1) * 4 + 1, 0) = -(p13 * p23);

		OPENCV_ELEM(W, (i - 1) * 4 + 2, 0) = p11 * p31 + p12 * p32;
		OPENCV_ELEM(W, (i - 1) * 4 + 2, 1) = p14 * p31 + p11 * p34;
		OPENCV_ELEM(W, (i - 1) * 4 + 2, 2) = p14 * p32 + p12 * p34;
		OPENCV_ELEM(W, (i - 1) * 4 + 2, 3) = p14 * p33 + p13 * p34;
		OPENCV_ELEM(W, (i - 1) * 4 + 2, 4) = p14 * p34;
		OPENCV_ELEM(b, (i - 1) * 4 + 2, 0) = -(p13 * p33);

		OPENCV_ELEM(W, (i - 1) * 4 + 3, 0) = p21 * p31 + p22 * p32;
		OPENCV_ELEM(W, (i - 1) * 4 + 3, 1) = p24 * p31 + p21 * p34;
		OPENCV_ELEM(W, (i - 1) * 4 + 3, 2) = p24 * p32 + p22 * p34;
		OPENCV_ELEM(W, (i - 1) * 4 + 3, 3) = p24 * p33 + p23 * p34;
		OPENCV_ELEM(W, (i - 1) * 4 + 3, 4) = p24 * p34;
		OPENCV_ELEM(b, (i - 1) * 4 + 3, 0) = -(p23 * p33);
	}

	opencv_debug("Autocalibrating equations", W);
	
	CvMat * solution = opencv_create_matrix(5, 1);
	cvSolve(W, b, solution, CV_SVD);

	// if first row in solution is not positive, replace W by -W
	/*if (OPENCV_ELEM(solution, 0, 0) <= 0) 
	{
		printf("--- !!! --- Multiplying the matrix by -1 --- !!! ---\n");
		for (int i = 0; i < W->rows; i++) 
		{
			for (int j = 0; j < W->cols; j++) 
			{
				OPENCV_ELEM(W, i, j) *= -1;
			}
		}

		cvSolve(W, b, solution, CV_SVD);
	}*/

	opencv_debug("Solution", solution);
	cvReleaseMat(&W); 
	cvReleaseMat(&b);

	// compute f_1, K_1 and w (the focal length of the first camera, calibration matrix 
	// of the first camera and the plane at infinity)
	double f_1 = OPENCV_ELEM(solution, 0, 0);
	if (f_1 < 0) 
	{
		printf("--- Multiplying f^2 by -1 ---\n");
		f_1 *= -1;
	}

	f_1 = sqrt(f_1);
	// f_1 *= 1000.0;
	printf("f_1 = %f\n\n", f_1);
	CvMat * K_1 = opencv_create_I_matrix(3); 
	OPENCV_ELEM(K_1, 0, 0) = f_1; 
	OPENCV_ELEM(K_1, 1, 1) = f_1;
	CvMat * w = opencv_create_matrix(3, 1); 
	OPENCV_ELEM(w, 0, 0) = OPENCV_ELEM(solution, 1, 0) / f_1;
	OPENCV_ELEM(w, 1, 0) = OPENCV_ELEM(solution, 2, 0) / f_1;
	OPENCV_ELEM(w, 2, 0) = OPENCV_ELEM(solution, 3, 0);

	// check with the last value of the solution vector, which contains 
	// the scalar product w_transposed * w
	const double w_Tw = sq(OPENCV_ELEM(w, 0, 0)) + sq(OPENCV_ELEM(w, 1, 0)) + sq(OPENCV_ELEM(w, 2, 0));

	// debug 
	opencv_debug("K_1", K_1);
	opencv_debug("w", w); 
	printf("difference between calculated and recomputed w_T * w = %f - %f = %f\n", OPENCV_ELEM(solution, 4, 0), w_Tw, OPENCV_ELEM(solution, 4, 0) - w_Tw);

	// contruct rectifying homography 
	CvMat * H_metric = opencv_create_matrix(4, 4);
	cvZero(H_metric);
	for (int i = 0; i < 3; i++) 
	{
		for (int j = 0; j < 3; j++) 
		{
			OPENCV_ELEM(H_metric, i, j) = OPENCV_ELEM(K_1, i, j); 
		}
	}

	CvMat * H_metric_4_prime = opencv_create_matrix(1, 3);
	cvTranspose(w, w);
	cvMatMul(w, K_1, H_metric_4_prime);

	for (int j = 0; j < 3; j++)
	{
		OPENCV_ELEM(H_metric, 3, j) = OPENCV_ELEM(H_metric_4_prime, 0, j);
	}

	OPENCV_ELEM(H_metric, 3, 3) = 1;
	cvInvert(H_metric, H_metric);

	// apply rectifying homography to all points
	for (size_t i = 0; i < m; i++) 
	{
		cvMatMul(H_metric, Xs[i], Xs[i]);
	}

	// release resources 
	cvReleaseMat(&K_1); 
	cvReleaseMat(&w);
	cvReleaseMat(&solution);

	return true;
}
//---------------------------------------------------------------
//【関数名 】:cv_ColorExtraction
//【処理概要】:色抽出
//【引数  】:src_img        = 入力画像(8bit3ch)
//      :dst_img        = 出力画像(8bit3ch)
//      :code        = 色空間の指定(CV_BGR2HSV,CV_BGR2Labなど)
//      :ch1_lower    = ch1のしきい値(小)
//      :ch1_upper    = ch1のしきい値(大)
//      :ch2_lower    = ch2のしきい値(小)
//      :ch2_upper    = ch2のしきい値(大)
//      :ch3_lower    = ch3のしきい値(小)
//      :ch3_upper    = ch3のしきい値(大)
//【戻り値 】:なし
//【備考  】:lower <= upperの場合、lower以上upper以下の範囲を抽出、
//      :lower >  upperの場合、upper以下lower以上の範囲を抽出します。
//---------------------------------------------------------------
void cv_ColorExtraction(IplImage* src_img, IplImage* dst_img,
                        int code,
                        int ch1_lower, int ch1_upper,
                        int ch2_lower, int ch2_upper,
                        int ch3_lower, int ch3_upper
                       ) {

    int i, k;

    IplImage *Color_img;
    IplImage *ch1_img, *ch2_img, *ch3_img;
    IplImage *Mask_img;

    int lower[3];
    int upper[3];
    int val[3];

    CvMat *lut;

    //codeに基づいたカラー変換
    Color_img = cvCreateImage(cvGetSize(src_img), src_img->depth, src_img->nChannels);
    cvCvtColor(src_img, Color_img, code);

    //3ChのLUT作成
    lut    = cvCreateMat(256, 1, CV_8UC3);

    lower[0] = ch1_lower;
    lower[1] = ch2_lower;
    lower[2] = ch3_lower;

    upper[0] = ch1_upper;
    upper[1] = ch2_upper;
    upper[2] = ch3_upper;

    for (i = 0; i < 256; i++) {
        for (k = 0; k < 3; k++) {
            if (lower[k] <= upper[k]) {
                if ((lower[k] <= i) && (i <= upper[k])) {
                    val[k] = 255;
                } else {
                    val[k] = 0;
                }
            } else {
                if ((i <= upper[k]) || (lower[k] <= i)) {
                    val[k] = 255;
                } else {
                    val[k] = 0;
                }
            }
        }
        //LUTの設定
        cvSet1D(lut, i, cvScalar(val[0], val[1], val[2]));
    }

    //3ChごとのLUT変換(各チャンネルごとに2値化処理)
    cvLUT(Color_img, Color_img, lut);
    cvReleaseMat(&lut);

    //各チャンネルごとのIplImageを確保する
    ch1_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    ch2_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    ch3_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);

    //チャンネルごとに二値化された画像をそれぞれのチャンネルに分解する
    cvSplit(Color_img, ch1_img, ch2_img, ch3_img, NULL);

    //3Ch全てのANDを取り、マスク画像を作成する。
    Mask_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    cvAnd(ch1_img, ch2_img, Mask_img);
    cvAnd(Mask_img, ch3_img, Mask_img);

    //入力画像(src_img)のマスク領域を出力画像(dst_img)へコピーする
    cvZero(dst_img);
    cvCopy(src_img, dst_img, Mask_img);

    //解放
    cvReleaseImage(&Color_img);
    cvReleaseImage(&ch1_img);
    cvReleaseImage(&ch2_img);
    cvReleaseImage(&ch3_img);
    cvReleaseImage(&Mask_img);

}
Esempio n. 29
0
void CLightSet::RunLightPrep(IplImage* src,IplImage* dest)
{
	int M,N;
	M=0;
	N=0;
	if (src->roi)
	{
		 M = src->roi->width;
		 N = src->roi->height;
	}
	else
	{
		 M = src->width;
		 N = src->height;
	}

	CvMat *matD; // create mat for meshgrid frequency matrices
	matD = cvCreateMat(M,N,CV_32FC1);

	CDM(M,N,matD);

	CvMat *matH;
	matH = cvCreateMat(M,N,CV_32FC1); // mat for lowpass filter

	float D0 = 10.0;
	float rH,rL,c;
	rH = 2.0;
	rL = 0.5;
	c  = 1.0;
	lpfilter(matD,matH,D0,rH,rL,c);

	IplImage *srcshift; // shift center
	srcshift = cvCloneImage(src);
	cvShiftDFT(srcshift,srcshift);

	IplImage *log, *temp;
	log = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);
	temp = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);

	cvCvtScale(srcshift,temp,1.0,0);
	cvLog(temp,log);
	cvCvtScale(log,log,-1.0,0);

	CvMat *Fourier;
	Fourier = cvCreateMat( M, N, CV_32FC2 );

	fft2(log,Fourier);
	IplImage* image_im;
	image_im = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,1);

	cvSplit(Fourier,dest,image_im,0,0);

	cvMul(dest,matH,dest); 
	cvMul(image_im,matH,image_im);

	IplImage *dst;
	dst  = cvCreateImage(cvGetSize(src),IPL_DEPTH_32F,2);

	cvMerge(dest,image_im,0,0,dst);
	cvDFT(dst,dst,CV_DXT_INV_SCALE); 

	cvExp(dst,dst);

	cvZero(dest);
	cvZero(image_im);

	cvSplit(dst,dest,image_im,0,0); 
	//使得图像按照原来的顺序显示
	cvShiftDFT(dest,dest);

	double max,min; // normalize
	cvMinMaxLoc(dest,&min,&max,NULL,NULL);

	cvReleaseImage(&image_im);
	cvReleaseImage(&srcshift);
 	cvReleaseImage(&dst);	
	cvReleaseImage(&log);
	cvReleaseImage(&temp);
	cvReleaseMat(&matD);
	cvReleaseMat(&matH);
}
Esempio n. 30
0
int CV_QueryHistTest::prepare_test_case( int test_case_idx )
{
    int code = CV_BaseHistTest::prepare_test_case( test_case_idx );

    if( code > 0 )
    {
        int i, j, iters;
        float default_value = 0.f;
        CvRNG* rng = ts->get_rng();
        CvMat* bit_mask = 0;
        int* idx;

        iters = (cvTsRandInt(rng) % MAX(total_size/10,100)) + 1;
        iters = MIN( iters, total_size*9/10 + 1 );
        
        indices = cvCreateMat( 1, iters*cdims, CV_32S );
        values = cvCreateMat( 1, iters, CV_32F );
        values0 = cvCreateMat( 1, iters, CV_32F );
        idx = indices->data.i;

        //printf( "total_size = %d, cdims = %d, iters = %d\n", total_size, cdims, iters );

        bit_mask = cvCreateMat( 1, (total_size + 7)/8, CV_8U );
        cvZero( bit_mask );

        #define GET_BIT(n) (bit_mask->data.ptr[(n)/8] & (1 << ((n)&7)))
        #define SET_BIT(n) bit_mask->data.ptr[(n)/8] |= (1 << ((n)&7))

        // set random histogram bins' values to the linear indices of the bins
        for( i = 0; i < iters; i++ )
        {
            int lin_idx = 0;
            for( j = 0; j < cdims; j++ )
            {
                int t = cvTsRandInt(rng) % dims[j];
                idx[i*cdims + j] = t;
                lin_idx = lin_idx*dims[j] + t;
            }

            if( cvTsRandInt(rng) % 8 || GET_BIT(lin_idx) )
            {
                values0->data.fl[i] = (float)(lin_idx+1);
                SET_BIT(lin_idx);
            }
            else
                // some histogram bins will not be initialized intentionally,
                // they should be equal to the default value
                values0->data.fl[i] = default_value;
        }

        // do the second pass to make values0 consistent with bit_mask
        for( i = 0; i < iters; i++ )
        {
            int lin_idx = 0;
            for( j = 0; j < cdims; j++ )
                lin_idx = lin_idx*dims[j] + idx[i*cdims + j];

            if( GET_BIT(lin_idx) )
                values0->data.fl[i] = (float)(lin_idx+1);
        }
    
        cvReleaseMat( &bit_mask );
    }

    return code;
}