CV_IMPL void
cvCalcImageHomography( float* line, CvPoint3D32f* _center,
                       float* _intrinsic, float* _homography )
{
    double norm_xy, norm_xz, xy_sina, xy_cosa, xz_sina, xz_cosa, nx1, plane_dist;
    float _ry[3], _rz[3], _r_trans[9];
    CvMat rx = cvMat( 1, 3, CV_32F, line );
    CvMat ry = cvMat( 1, 3, CV_32F, _ry );
    CvMat rz = cvMat( 1, 3, CV_32F, _rz );
    CvMat r_trans = cvMat( 3, 3, CV_32F, _r_trans );
    CvMat center = cvMat( 3, 1, CV_32F, _center );

    float _sub[9];
    CvMat sub = cvMat( 3, 3, CV_32F, _sub );
    float _t_trans[3];
    CvMat t_trans = cvMat( 3, 1, CV_32F, _t_trans );

    CvMat intrinsic = cvMat( 3, 3, CV_32F, _intrinsic );
    CvMat homography = cvMat( 3, 3, CV_32F, _homography );

    if( !line || !_center || !_intrinsic || !_homography )
        CV_Error( CV_StsNullPtr, "" );

    norm_xy = cvSqrt( line[0] * line[0] + line[1] * line[1] );
    xy_cosa = line[0] / norm_xy;
    xy_sina = line[1] / norm_xy;

    norm_xz = cvSqrt( line[0] * line[0] + line[2] * line[2] );
    xz_cosa = line[0] / norm_xz;
    xz_sina = line[2] / norm_xz;

    nx1 = -xz_sina;

    _rz[0] = (float)(xy_cosa * nx1);
    _rz[1] = (float)(xy_sina * nx1);
    _rz[2] = (float)xz_cosa;
    cvScale( &rz, &rz, 1./cvNorm(&rz,0,CV_L2) );

    /*  new axe  y  */
    cvCrossProduct( &rz, &rx, &ry );
    cvScale( &ry, &ry, 1./cvNorm( &ry, 0, CV_L2 ) );

    /*  transpone rotation matrix    */
    memcpy( &_r_trans[0], line, 3*sizeof(float));
    memcpy( &_r_trans[3], _ry, 3*sizeof(float));
    memcpy( &_r_trans[6], _rz, 3*sizeof(float));

    /*  calculate center distanse from arm plane  */
    plane_dist = cvDotProduct( &center, &rz );

    /* calculate (I - r_trans)*center */
    cvSetIdentity( &sub );
    cvSub( &sub, &r_trans, &sub );
    cvMatMul( &sub, &center, &t_trans ); 

    cvMatMul( &t_trans, &rz, &sub );
    cvScaleAdd( &sub, cvRealScalar(1./plane_dist), &r_trans, &sub ); /* ? */

    cvMatMul( &intrinsic, &sub, &r_trans );
    cvInvert( &intrinsic, &sub, CV_SVD );
    cvMatMul( &r_trans, &sub, &homography );
}
Пример #2
0
CV_IMPL CvKalman*
cvCreateKalman( int DP, int MP, int CP )
{
    CvKalman *kalman = 0;

    if( DP <= 0 || MP <= 0 )
        CV_Error( CV_StsOutOfRange,
        "state and measurement vectors must have positive number of dimensions" );

    if( CP < 0 )
        CP = DP;

    /* allocating memory for the structure */
    kalman = (CvKalman *)cvAlloc( sizeof( CvKalman ));
    memset( kalman, 0, sizeof(*kalman));

    kalman->DP = DP;
    kalman->MP = MP;
    kalman->CP = CP;

    kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 );
    cvZero( kalman->state_pre );

    kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 );
    cvZero( kalman->state_post );

    kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 );
    cvSetIdentity( kalman->transition_matrix );

    kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 );
    cvSetIdentity( kalman->process_noise_cov );

    kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 );
    cvZero( kalman->measurement_matrix );

    kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 );
    cvSetIdentity( kalman->measurement_noise_cov );

    kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 );

    kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 );
    cvZero( kalman->error_cov_post );

    kalman->gain = cvCreateMat( DP, MP, CV_32FC1 );

    if( CP > 0 )
    {
        kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 );
        cvZero( kalman->control_matrix );
    }

    kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 );
    kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 );
    kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 );
    kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 );
    kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 );

#if 1
    kalman->PosterState = kalman->state_pre->data.fl;
    kalman->PriorState = kalman->state_post->data.fl;
    kalman->DynamMatr = kalman->transition_matrix->data.fl;
    kalman->MeasurementMatr = kalman->measurement_matrix->data.fl;
    kalman->MNCovariance = kalman->measurement_noise_cov->data.fl;
    kalman->PNCovariance = kalman->process_noise_cov->data.fl;
    kalman->KalmGainMatr = kalman->gain->data.fl;
    kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl;
    kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl;
#endif

    return kalman;
}
Пример #3
0
int StereoVision::calibrationEnd() {
    calibrationStarted = false;

    // ARRAY AND VECTOR STORAGE:
    double M1[3][3], M2[3][3], D1[5], D2[5];
    double R[3][3], T[3], E[3][3], F[3][3];
    CvMat _M1,_M2,_D1,_D2,_R,_T,_E,_F;

    _M1 = cvMat(3, 3, CV_64F, M1 );
    _M2 = cvMat(3, 3, CV_64F, M2 );
    _D1 = cvMat(1, 5, CV_64F, D1 );
    _D2 = cvMat(1, 5, CV_64F, D2 );
    _R = cvMat(3, 3, CV_64F, R );
    _T = cvMat(3, 1, CV_64F, T );
    _E = cvMat(3, 3, CV_64F, E );
    _F = cvMat(3, 3, CV_64F, F );

    // HARVEST CHESSBOARD 3D OBJECT POINT LIST:
    objectPoints.resize(sampleCount*cornersN);

    for(int k=0; k<sampleCount; k++)
        for(int i = 0; i < cornersY; i++ )
            for(int j = 0; j < cornersX; j++ )
                objectPoints[k*cornersY*cornersX + i*cornersX + j] = cvPoint3D32f(i, j, 0);


    npoints.resize(sampleCount,cornersN);

    int N = sampleCount * cornersN;


    CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
    CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
    CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
    CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
    cvSetIdentity(&_M1);
    cvSetIdentity(&_M2);
    cvZero(&_D1);
    cvZero(&_D2);

    //CALIBRATE THE STEREO CAMERAS
    cvStereoCalibrate( &_objectPoints, &_imagePoints1,
                       &_imagePoints2, &_npoints,
                       &_M1, &_D1, &_M2, &_D2,
                       imageSize, &_R, &_T, &_E, &_F,
                       cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
                       CV_CALIB_FIX_ASPECT_RATIO + CV_CALIB_ZERO_TANGENT_DIST + CV_CALIB_SAME_FOCAL_LENGTH
                     );

    //Always work in undistorted space
    cvUndistortPoints( &_imagePoints1, &_imagePoints1,&_M1, &_D1, 0, &_M1 );
    cvUndistortPoints( &_imagePoints2, &_imagePoints2,&_M2, &_D2, 0, &_M2 );

    //COMPUTE AND DISPLAY RECTIFICATION


    double R1[3][3], R2[3][3];
    CvMat _R1 = cvMat(3, 3, CV_64F, R1);
    CvMat _R2 = cvMat(3, 3, CV_64F, R2);

    //HARTLEY'S RECTIFICATION METHOD
    double H1[3][3], H2[3][3], iM[3][3];
    CvMat _H1 = cvMat(3, 3, CV_64F, H1);
    CvMat _H2 = cvMat(3, 3, CV_64F, H2);
    CvMat _iM = cvMat(3, 3, CV_64F, iM);

    cvStereoRectifyUncalibrated(
        &_imagePoints1,&_imagePoints2, &_F,
        imageSize,
        &_H1, &_H2, 3
    );
    cvInvert(&_M1, &_iM);
    cvMatMul(&_H1, &_M1, &_R1);
    cvMatMul(&_iM, &_R1, &_R1);
    cvInvert(&_M2, &_iM);
    cvMatMul(&_H2, &_M2, &_R2);
    cvMatMul(&_iM, &_R2, &_R2);


    //Precompute map for cvRemap()
    cvReleaseMat(&mx1);
    cvReleaseMat(&my1);
    cvReleaseMat(&mx2);
    cvReleaseMat(&my2);
    mx1 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    my1 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    mx2 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    my2 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );

    cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
    cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_M2,mx2,my2);

    calibrationDone = true;

    return RESULT_OK;
}
Пример #4
0
void cv_SetIdentity (CvArr* mat, CvScalar* scalar) {
  cvSetIdentity(mat, *scalar);
}
Пример #5
0
int main(int argc, char **argv)
{

	// Initialize, create Kalman Filter object, window, random number
	// generator etc.
	//
	cvNamedWindow("Kalman", 1);
	CvRandState rng;
	cvRandInit(&rng, 0, 1, -1, CV_RAND_UNI);

	IplImage *img = cvCreateImage(cvSize(500, 500), 8, 3);
	CvKalman *kalman = cvCreateKalman(2, 1, 0);
	// state is (phi, delta_phi) - angle and angular velocity
	// Initialize with random guess.
	//
	CvMat *x_k = cvCreateMat(2, 1, CV_32FC1);
	cvRandSetRange(&rng, 0, 0.1, 0);
	rng.disttype = CV_RAND_NORMAL;
	cvRand(&rng, x_k);

	// process noise
	//
	CvMat *w_k = cvCreateMat(2, 1, CV_32FC1);

	// measurements, only one parameter for angle
	//
	CvMat *z_k = cvCreateMat(1, 1, CV_32FC1);
	cvZero(z_k);

	// Transition matrix 'F' describes relationship between
	// model parameters at step k and at step k+1 (this is 
	// the "dynamics" in our model.
	//
	const float F[] = { 1, 1, 0, 1 };
	memcpy(kalman->transition_matrix->data.fl, F, sizeof(F));
	// Initialize other Kalman filter parameters.
	//
	cvSetIdentity(kalman->measurement_matrix, cvRealScalar(1));
	cvSetIdentity(kalman->process_noise_cov, cvRealScalar(1e-5));
	cvSetIdentity(kalman->measurement_noise_cov, cvRealScalar(1e-1));
	cvSetIdentity(kalman->error_cov_post, cvRealScalar(1));

	// choose random initial state
	//
	cvRand(&rng, kalman->state_post);

	while (1) {
		// predict point position
		const CvMat *y_k = cvKalmanPredict(kalman, 0);

		// generate measurement (z_k)
		//
		cvRandSetRange(&rng,
					   0, sqrt(kalman->measurement_noise_cov->data.fl[0]), 0);
		cvRand(&rng, z_k);
		cvMatMulAdd(kalman->measurement_matrix, x_k, z_k, z_k);
		// plot points (eg convert to planar co-ordinates and draw)
		//
		cvZero(img);
		cvCircle(img, phi2xy(z_k), 4, CVX_YELLOW);	// observed state
		cvCircle(img, phi2xy(y_k), 4, CVX_WHITE, 2);	// "predicted" state
		cvCircle(img, phi2xy(x_k), 4, CVX_RED);	// real state
		cvShowImage("Kalman", img);
		// adjust Kalman filter state
		//
		cvKalmanCorrect(kalman, z_k);

		// Apply the transition matrix 'F' (eg, step time forward)
		// and also apply the "process" noise w_k.
		//
		cvRandSetRange(&rng, 0, sqrt(kalman->process_noise_cov->data.fl[0]), 0);
		cvRand(&rng, w_k);
		cvMatMulAdd(kalman->transition_matrix, x_k, w_k, x_k);

		// exit if user hits 'Esc'
		if (cvWaitKey(100) == 27)
			break;
	}

	return 0;
}
Пример #6
0
KalmanPoint *NewKalman(PointSeq *Point_New, int ID, int frame_count, int contourArea) {
	const float A[] = { 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1 };
	const float H[] = { 1, 0, 0, 0, 0, 1, 0, 0 };

	KalmanPoint *Kalmanfilter;
	Kalmanfilter = (KalmanPoint *) malloc(sizeof(KalmanPoint));
	CvKalman *Kalman = cvCreateKalman(4, 2, 0);

	float measure[2] =
			{ (float) Point_New->Point.x, (float) Point_New->Point.y };
	CvMat measurement = cvMat(2, 1, CV_32FC1, measure);

	// initialize the Kalman filter
	memcpy(Kalman->transition_matrix->data.fl, A, sizeof(A));
	memcpy(Kalman->measurement_matrix->data.fl, H, sizeof(H));
	cvSetIdentity(Kalman->error_cov_post, cvRealScalar(10));
	cvSetIdentity(Kalman->process_noise_cov, cvRealScalar(1e-5));
	cvSetIdentity(Kalman->measurement_noise_cov, cvRealScalar(1e-1));

	// for the first time, use the measured point as the state_post
	cvZero(Kalman->state_post);
	cvmSet(Kalman->state_post, 0, 0, Point_New->Point.x);
	cvmSet(Kalman->state_post, 1, 0, Point_New->Point.y);

	// use Kalman  filter to predict the position of the centroid
	const CvMat *prediction = cvKalmanPredict(Kalman, 0);
	// use the measurement to correct the position
	const CvMat *correction = cvKalmanCorrect(Kalman, &measurement);

	/*
	 // test code
	 printf("measurement %f,%f\n",cvmGet(&measurement,0,0),cvmGet(&measurement, 1, 0));
	 printf("prediction %f,%f\n",cvmGet(prediction, 0, 0),cvmGet(prediction, 1, 0));
	 printf("correction %f,%f\ndelta %f,%f\n",cvmGet(correction, 0, 0),cvmGet(correction, 1, 0),
	 cvmGet(correction, 2, 0), cvmGet(correction, 3, 0));
	 */
	Kalmanfilter->Point_now = Point_New->Point;
	Kalmanfilter->Point_pre = Point_New->Point;
	Kalmanfilter->firstPoint = Point_New->Point;
	Kalmanfilter->firstFrame = frame_count; //tambahan 25 januari 2016 speed measurement
	Kalmanfilter->lastFrame = frame_count; //tambahan 25 januari 2016 speed measurement
	Kalmanfilter->Kalman = Kalman;
	Kalmanfilter->ID = Point_New->ID = ID;
	Kalmanfilter->contourArea=contourArea;
	/*
	//nilai 1 = mobil nilai 2= truk nilai 3 = undefined
	if (Kalmanfilter->contourArea >= 4800
			&& Kalmanfilter->contourArea <= 19900) {
		Kalmanfilter->jenis = 1;

	} else if (Kalmanfilter->contourArea > 19900) {
		Kalmanfilter->jenis = 2;

	} else {
		Kalmanfilter->jenis = 3;
	}
	*/
	//nilai 1 = motor;nilai 2 = mobil;nilai 3 = truk sedang;nilai 4 = truk besar;nilai 0 = undefined;
	if (Kalmanfilter->contourArea >= 4000
			&& Kalmanfilter->contourArea <= 7000) {
		Kalmanfilter->jenis = 2;
	} else if (Kalmanfilter->contourArea > 7000
			&& Kalmanfilter->contourArea <= 12000) {
		Kalmanfilter->jenis = 3;
	} else if (Kalmanfilter->contourArea > 12000) {
		Kalmanfilter->jenis = 4;
	} else {
		Kalmanfilter->jenis = 0;
	}
	//Kalmanfilter->jenis= 0;//tambahan 26 januari 2016
	Kalmanfilter->Loss = 0;

	return Kalmanfilter;
}
Пример #7
0
TargetObject::TargetObject(int idd, std::string namee, float xx, float yy, float zz, float vxx, float vyy, float vzz, int r, int g, int b, int inacTime):
	id(idd),
	name(namee),
	x(xx),
	y(yy),
	z (zz),
	vx(vxx),
	vz(vzz),
	vy(vyy),
	red(r),
	green(g),
	blue(b),
	//associated_to_cam_data(false),
	//associated_to_any_data(false),
	hue(NOF_HUEBINS/2.0),
	weight(-1),
	hueDataAvailable(false),
	currentHueValue(1.0),
	minMahaDist(999.),
	attentionalPriority(20. + ((float)rand()/(float)RAND_MAX)),
	sendx(xx),
	sendy(yy),
	sendz(zz),
	tapx(-0.5),
	tapy(0.0)
{

	predX = x;
	predY = y;

	weightReset = false;
	curWeight = 0.0;
	prevWeight = 0.0;
	speedVec.clear();
	speedVecx.clear();
	speedVecy.clear();
	meanSpeed = 0.0;
	meanSpeedx = 0.0;
	meanSpeedy = 0.0;
	computeSpecialID();

	//tempTimer.setActiveTime(); // this is testing timer..delete later
	//newHue = false;
	
	// set timer
	timer.setActiveTime();
	//std::std::cout << "targets previousActive time at creation = " << timer->previousActiveTime << std::std::endl;

	// initialize measurement matrtimer.setActiveTime();ix
	measurement = cvCreateMat( MES_SIZE, 1, CV_32FC1 );

	// initialize measurement_prediction matrix
	measurement_pred = cvCreateMat( MES_SIZE, 1, CV_32FC1 );

	// temporary matrices
	combined_innov = cvCreateMat( MES_SIZE, 1, CV_32FC1 );
	tempo1 = cvCreateMat( MES_SIZE, 1, CV_32FC1 );
	tempo2 = cvCreateMat( KALMAN_SIZE, 1, CV_32FC1 );
	tempoTrans1 = cvCreateMat( 1, MES_SIZE, CV_32FC1 );
	tempCov1 = cvCreateMat(MES_SIZE,KALMAN_SIZE,CV_32FC1);		
	tempCov2 = cvCreateMat(KALMAN_SIZE,MES_SIZE,CV_32FC1);		
	tempCov3 = cvCreateMat(MES_SIZE,MES_SIZE,CV_32FC1);
	tempCov4 = cvCreateMat(KALMAN_SIZE,KALMAN_SIZE,CV_32FC1);
	tempCov5 = cvCreateMat(KALMAN_SIZE,KALMAN_SIZE,CV_32FC1);
	tempCov6 = cvCreateMat(MES_SIZE,MES_SIZE,CV_32FC1);
	combInnovCov = cvCreateMat(MES_SIZE,MES_SIZE,CV_32FC1);

	/// extra error
	extra_error = cvCreateMat(KALMAN_SIZE,KALMAN_SIZE,CV_32FC1);

	// JPDA event probabilities
	event_probs = new std::vector<float>;

	all_innovs = new std::vector<CvMat*>; 


	all_measurements = new std::vector<high_dim_data>;
	// CvKalman* cvCreateKalman( int dynam_params, int measure_params, int control_params=0 );
	kalman  = cvCreateKalman( KALMAN_SIZE, MES_SIZE, CONTROL_SIZE );    // state is x,y,vx,vy and control vector is ax,ay (=acceleration)
	
	// measurement is x,y,z

	//std::cout << "kalman state size = " << kalman->DP << std::endl; 
	//std::cout << "kalman mes size = " << kalman->MP << std::endl;

	// set the current state
	CvMat* M = kalman->state_post;
	int   step  = M->step/sizeof(float);
	float *data = M->data.fl;

	(data+0*step)[0] = xx;
	(data+1*step)[0] = yy;
	(data+2*step)[0] = zz;
	(data+3*step)[0] = vxx;
	(data+4*step)[0] = vyy;
	(data+5*step)[0] = vzz;
	(data+6*step)[0] = 0.0; // acceleration
	(data+7*step)[0] = 0.0;
	(data+8*step)[0] = hue; // hue
	(data+9*step)[0] = weight; // weight




	/// set the prediction to the same as x, y (for validation gate)
	cvmSet(kalman->state_pre,0,0, 300.);
	cvmSet(kalman->state_pre,1,0, 300.);
	notAssociated = false;
	lostTrack = false;

	//cvSetIdentity( extra_error, cvRealScalar(60) ); /// 3500 until april 2008

	// initialize the Kalman vectors and matrices
	//const float A[] = { 1.0, 0.0, 0.0, 0.0, 1.0 ,0.0,0.0,0.0,1.0 }; // transition matrix
// 	const float A[] = { 	1.0, 0.0, 1.0, 0.0, 1.0, 0.0 ,
// 			    	0.0, 1.0, 0.0, 1.0, 0.0, 1.0,
// 				0.0, 0.0, 1.0, 0.0, 1.0, 0.0,
// 				0.0, 0.0, 0.0, 1.0, 0.0, 1.0,
// 				0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
// 				0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }; // transition matrix

	//                      X,   Y,   Z,   VX,  VY,  VZ,  AX,  AY,  AZ,  HUE  WEI

	const float A[] = { 	1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
			    	0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,  0.0, 0.0, 1.0,
				}; // transition matrix 

	
	memcpy( kalman->transition_matrix->data.fl, A, sizeof(A));
	//printMatrix(kalman->transition_matrix, "transition matrix");
	///std::cout << "ok 1" << std::endl;
	// H = measurement matrix 
	//const float MES[] = { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 ,
	//		      0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }; //measurement matrix

	const float MES[] = { 
	1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
	0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
	0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
	0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
	0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0
 				}; //


	memcpy( kalman->measurement_matrix->data.fl, MES, sizeof(MES));
        //printMatrix(kalman->measurement_matrix, "measurement matrix");

	

	/// process noice covariance matrix 

	const float B[] = {     0.01, 0.0, 0.0, 0.001, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0,
			    	0.0, 0.01, 0.0, 0.0, 0.001, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.001, 0.0, 0.0, 0.001, 0.0, 0.0, 0.001, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.001, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,  0.0, 0.0, 0.01	 
				}; 

	///memcpy( kalman->process_noise_cov->data.fl, B, sizeof(B));
  	
	cvSetIdentity( kalman->process_noise_cov, cvRealScalar(1.0) );
        cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(0.01) );

	/// meaurement noice covariance
	///cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(1.0));
       
	
	/// estimation error covariance (P-Schlange)
	cvSetIdentity( kalman->error_cov_post, cvRealScalar(1.0)); 
	estimationAdvanced = false;

	/// testing
	//std::cout << "target created: id, x,y,hue,weight = " << id << ", " << x << ", " << y << ", " << hue << ", " << weight << std::endl;

	/// for computation of the validation gate
	xaxis = 100.;
	yaxis = 100.;
	growth_factor = 0.1;

	
}
Пример #8
0
// do the Kalman estimation..given the measurement (x,y) 
void TargetObject::kalmanEstimateState(float xx, float yy, float zz, float huee, float weightt){

	/// attention: make sure that the covariance min and max are kept (otherwise we run into numerical instability problems)

	//std::cout << " --------------kalmanEstimateState with " << xx << ", " << yy << ", " << huee << ", " << weightt << std::endl;
	/// set timer 
	//timer.setActiveTime();	
	
	// save old state
	xOld = x;
	yOld = y;
	zOld = z;
	vxOld = vx;
	vyOld = vy;
	vzOld = vz;
	hueOld = hue;
	weightOld = weight;


	// pointer to the state data
	CvMat* M = kalman->state_post;
	int   step  = M->step/sizeof(float);
	float *data = M->data.fl;


	// measurement 
	 /* x,y is measured */
	// pointer to the measurement data
	float* mesdata = measurement->data.fl;
	int   stepm  = measurement->step/sizeof(float);
	(mesdata+0*stepm)[0] = xx;
	(mesdata+1*stepm)[0] = yy;
	(mesdata+2*stepm)[0] = zz;
	(mesdata+3*stepm)[0] = huee;
	(mesdata+4*stepm)[0] = weightt;

	cvKalmanCorrect( kalman, measurement );
	
	float* postdata = kalman->state_post->data.fl;
	x = (postdata+0*step)[0];
	y = (postdata+1*step)[0];
	z = (postdata+2*step)[0];
	vx = (postdata+3*step)[0];
	vy = (postdata+4*step)[0];
	vz = (postdata+5*step)[0];
	hue = (postdata+9*step)[0];
	weight = (postdata+10*step)[0];


	if (isnan(x)){
		x = xOld;
		std::cout << "kalmanUpdateState: x is nan" << std::endl;
	}
	if (isnan(y)){
		y = yOld;
		std::cout << "kalmanUpdateState: y is nan" << std::endl;
	}
	if (isnan(z)){
		z = zOld;
		std::cout << "kalmanUpdateState: z is nan" << std::endl;
	}
	if (isnan(vx)){
		vx = vxOld;
		std::cout << "kalmanUpdateState: vx is nan" << std::endl;
	}
	if (isnan(vy)){
		vy = vyOld;
		std::cout << "kalmanUpdateState: vy is nan" << std::endl;
	}
	if (isnan(vz)){
		vz = vzOld;
		std::cout << "kalmanUpdateState: vz is nan" << std::endl;
	}
	
	if (isnan(hue)){
		hue = hueOld;
		std::cout << "kalmanUpdateState: hue is nan" << std::endl;
	}
	if (isnan(weight)){
		std::cout << "kalmanUpdateState: weight is nan" << std::endl;
		weight = weightOld;
	}

	//std::cout << "id: " << id << ", " << x << ", " << y <<  ", " << hue << ", " << weight << std::endl; 
	//std::cout << "id = " << id << ", norm cov = " << cvNorm(kalman->error_cov_post) << std::endl;
	double curNorm = cvNorm(kalman->error_cov_post);
	if (curNorm > 17.0){
		estimationAdvanced = true;
	}


	// 22.0 and 17.0 before
	if ((curNorm > 22.0 ) || ((estimationAdvanced) && (curNorm < 18.0)))
	{

		estimationAdvanced = false;
		//std::cout << std::endl << std::endl << "************************************ resetting  " << std::endl << std::endl; 

		/// set the new x and y to the data
		// set the current state
		CvMat* M = kalman->state_post;
		int   step  = M->step/sizeof(float);
		float *data = M->data.fl;

		(data+0*step)[0] = xx;
		(data+1*step)[0] = yy;
		(data+2*step)[0] = zz;
		(data+3*step)[0] = zz;
		(data+4*step)[0] = 1.0;
		(data+5*step)[0] = 1.0;
		(data+6*step)[0] = 1.0;
		(data+7*step)[0] = 1.0; 
		(data+8*step)[0] = 1.0;
		(data+9*step)[0] = huee; 
		(data+10*step)[0] = weightt; 


		cvSetIdentity( kalman->error_cov_post, cvRealScalar(1.0));

		cvCopy(kalman->state_post,kalman->state_pre );

		cvSetIdentity( kalman->process_noise_cov, cvRealScalar(5) );
        	cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(5.0) );
		

		/// for computation of the validation gate
		xaxis = 100.;
		yaxis = 100.;
		growth_factor = 0.1;

			
	}



	/// testing ID from hues
// 	if ((hue < 12.0) && (newHue) && (id ==0)){
// 		std::cout << "recovery green = " << tempTimer.getInactiveTime() << std::endl;
// 		 tempTimer.setActiveTime();
// 		newHue = false;
// 	}
// 	if ((hue > 15.0) && (newHue) && (id ==1)){
// 		std::cout << "recovery red = " << tempTimer.getInactiveTime() << std::endl;
// 		 tempTimer.setActiveTime();
// 		newHue = false;
// 	}

}
Пример #9
0
int main(int argc, char** argv)
{
    
    CvMat* M = cvCreateMat(3, 3, CV_64FC1);

    cvSetIdentity(M);

    CvMat* L = MT_CreateCholeskyResult(M);

    MT_Cholesky(M, L);

    disp_mat(M, "M");
    disp_mat(L, "L = chol(M)");

    cvmSet(M, 1, 1, 4.0);
    cvmSet(M, 2, 2, 9.0);

    MT_Cholesky(M, L);

    disp_mat(M, "M");
    disp_mat(L, "L = chol(M)");

    cvmSet(M, 0, 1, 0.1);
    cvmSet(M, 0, 2, 0.1);
    cvmSet(M, 1, 0, 0.1);
    cvmSet(M, 1, 2, 0.1);
    cvmSet(M, 2, 0, 0.1);
    cvmSet(M, 2, 1, 0.1);

    MT_Cholesky(M, L);

    disp_mat(M, "M");
    disp_mat(L, "L = chol(M)");

    cvReleaseMat(&M);
    cvReleaseMat(&L);

    M = cvCreateMat(5, 5, CV_64FC1);
    cvSet(M, cvScalar(0.1));

    for(unsigned int i = 0; i < 5; i++)
    {
        cvmSet(M, i, i, (double) (i+1));
    }

    L = MT_CreateCholeskyResult(M);

    MT_Cholesky(M, L);

    disp_mat(M, "M");
    disp_mat(L, "L = chol(M)");

    cvReleaseMat(&M);
    cvReleaseMat(&L);


    CvMat* C = cvCreateMat(4, 4, CV_64FC1);
    CvMat* A = cvCreateMatHeader(2, 2, CV_64FC1);
    CvMat* B = cvCreateMatHeader(2, 2, CV_64FC1);

    cvGetSubRect(C, A, cvRect(0, 0, 2, 2));
    cvGetSubRect(C, B, cvRect(2, 2, 2, 2));

    cvSet(C, cvScalar(0));
    cvSet(A, cvScalar(1));
    cvSet(B, cvScalar(2));

    cvReleaseMat(&A);
    cvReleaseMat(&B);

    disp_mat(C, "C (composited)");

    cvReleaseMat(&C);    
        
}
int main(int argc, char** argv)
{
    const float A[] = { 1, 1, 0, 1 };//状态转移矩阵
    
    IplImage* img = cvCreateImage( cvSize(500,500), 8, 3 );//创建显示所用的图像
    CvKalman* kalman = cvCreateKalman( 2, 1, 0 );//创建cvKalman数据结构,状态向量为2维,观测向量为1维,无激励输入维
    CvMat* state = cvCreateMat( 2, 1, CV_32FC1 ); //(phi, delta_phi) 定义了状态变量
    CvMat* process_noise = cvCreateMat( 2, 1, CV_32FC1 );// 创建两行一列CV_32FC1的单通道浮点型矩阵
    CvMat* measurement = cvCreateMat( 1, 1, CV_32FC1 ); //定义观测变量
    CvRNG rng = cvRNG(-1);//初始化一个随机序列函数
    char code = -1;

    cvZero( measurement );//观测变量矩阵置零
    cvNamedWindow( "Kalman", 1 );

    for(;;)
    {		//用均匀分布或者正态分布的随机数填充输出数组state
        cvRandArr( &rng, state, CV_RAND_NORMAL, cvRealScalar(0), cvRealScalar(0.1) );//状态state
        memcpy( kalman->transition_matrix->data.fl, A, sizeof(A));//初始化状态转移F矩阵
        
        //cvSetIdentity()用法:把数组中除了行数与列数相等以外的所有元素的值都设置为0;行数与列数相等的元素的值都设置为1
        //我们将(第一个前假象阶段的)后验状态初始化为一个随机值
        cvSetIdentity( kalman->measurement_matrix, cvRealScalar(1) );//观测矩阵H
        cvSetIdentity( kalman->process_noise_cov, cvRealScalar(1e-5) );//过程噪声Q
        cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(1e-1) );//观测噪声R 
        cvSetIdentity( kalman->error_cov_post, cvRealScalar(1));//后验误差协方差
        cvRandArr( &rng, kalman->state_post, CV_RAND_NORMAL, cvRealScalar(0), cvRealScalar(0.1) );//校正状态
        
        //在时机动态系统上开始预测
        
        for(;;)
        {
            #define calc_point(angle)                                      \
                cvPoint( cvRound(img->width/2 + img->width/3*cos(angle)),  \
                         cvRound(img->height/2 - img->width/3*sin(angle))) 

            float state_angle = state->data.fl[0];
            CvPoint state_pt = calc_point(state_angle);
            
            const CvMat* prediction = cvKalmanPredict( kalman, 0 );//计算下一个时间点的预期值,激励项输入为0
            float predict_angle = prediction->data.fl[0];
            CvPoint predict_pt = calc_point(predict_angle);
            
            float measurement_angle;
            CvPoint measurement_pt;

            cvRandArr( &rng, measurement, CV_RAND_NORMAL, cvRealScalar(0),
                       cvRealScalar(sqrt(kalman->measurement_noise_cov->data.fl[0])) );

            /* generate measurement */
            cvMatMulAdd( kalman->measurement_matrix, state, measurement, measurement );
            //cvMatMulAdd(src1,src2,src3,dst)就是实现dist=src1*src2+src3; 

            measurement_angle = measurement->data.fl[0];
            measurement_pt = calc_point(measurement_angle);
            
            //调用Kalman滤波器并赋予其最新的测量值,接下来就是产生过程噪声,然后对状态乘以传递矩阵F完成一次迭代并加上我们产生的过程噪声
            /* plot points */
            #define draw_cross( center, color, d )                                 \
                cvLine( img, cvPoint( center.x - d, center.y - d ),                \
                             cvPoint( center.x + d, center.y + d ), color, 1, CV_AA, 0); \
                cvLine( img, cvPoint( center.x + d, center.y - d ),                \
                             cvPoint( center.x - d, center.y + d ), color, 1, CV_AA, 0 )

            cvZero( img );
            //使用上面宏定义的函数
            draw_cross( state_pt, CV_RGB(255,255,255), 3 );//白色,状态点
            draw_cross( measurement_pt, CV_RGB(255,0,0), 3 );//红色,测量点
            draw_cross( predict_pt, CV_RGB(0,255,0), 3 );//绿色,估计点
            cvLine( img, state_pt, measurement_pt, CV_RGB(255,0,0), 3, CV_AA, 0 );
            cvLine( img, state_pt, predict_pt, CV_RGB(255,255,0), 3, CV_AA, 0 );
            
            cvKalmanCorrect( kalman, measurement );//校正新的测量值

            cvRandArr( &rng, process_noise, CV_RAND_NORMAL, cvRealScalar(0),
                       cvRealScalar(sqrt(kalman->process_noise_cov->data.fl[0])));//设置正态分布过程噪声
            cvMatMulAdd( kalman->transition_matrix, state, process_noise, state );

            cvShowImage( "Kalman", img );
			//当按键按下时,开始新的循环,初始矩阵可能会改变,所以移动速率会改变
            code = (char) cvWaitKey( 100 );
            if( code > 0 )
                break;
        }
        if( code == 27 || code == 'q' || code == 'Q' )
            break;
    }
    
    cvDestroyWindow("Kalman");

    return 0;
}
Пример #11
0
int main(int argc, char** argv)
{
    /* A matrix data */
    const float A[] = { 1, 1, 0, 1 };

    IplImage* img = cvCreateImage( cvSize(500,500), 8, 3 );
    CvKalman* kalman = cvCreateKalman( 2, 1, 0 );
    /* state is (phi, delta_phi) - angle and angle increment */
    CvMat* state = cvCreateMat( 2, 1, CV_32FC1 );
    CvMat* process_noise = cvCreateMat( 2, 1, CV_32FC1 );
    /* only phi (angle) is measured */
    CvMat* measurement = cvCreateMat( 1, 1, CV_32FC1 );
    CvRandState rng;
    int code = -1;

    cvRandInit( &rng, 0, 1, -1, CV_RAND_UNI );

    cvZero( measurement );
    cvNamedWindow( "Kalman", 1 );

    for(;;)
    {
        cvRandSetRange( &rng, 0, 0.1, 0 );
        rng.disttype = CV_RAND_NORMAL;

        cvRand( &rng, state );

        memcpy( kalman->transition_matrix->data.fl, A, sizeof(A));
        cvSetIdentity( kalman->measurement_matrix, cvRealScalar(1) );
        cvSetIdentity( kalman->process_noise_cov, cvRealScalar(1e-5) );
        cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(1e-1) );
        cvSetIdentity( kalman->error_cov_post, cvRealScalar(1));
        /* choose random initial state */
        cvRand( &rng, kalman->state_post );

        rng.disttype = CV_RAND_NORMAL;

        for(;;)
        {
            #define calc_point(angle)                                      \
                cvPoint( cvRound(img->width/2 + img->width/3*cos(angle)),  \
                         cvRound(img->height/2 - img->width/3*sin(angle)))

            float state_angle = state->data.fl[0];
            CvPoint state_pt = calc_point(state_angle);

            /* predict point position */
            const CvMat* prediction = cvKalmanPredict( kalman, 0 );
            float predict_angle = prediction->data.fl[0];
            CvPoint predict_pt = calc_point(predict_angle);
            float measurement_angle;
            CvPoint measurement_pt;

            cvRandSetRange( &rng,
                            0,
                            sqrt(kalman->measurement_noise_cov->data.fl[0]),
                            0 );
            cvRand( &rng, measurement );

            /* generate measurement */
            cvMatMulAdd( kalman->measurement_matrix, state, measurement, measurement );

            measurement_angle = measurement->data.fl[0];
            measurement_pt = calc_point(measurement_angle);

            /* plot points */
            #define draw_cross( center, color, d )                        \
                cvLine( img, cvPoint( center.x - d, center.y - d ),       \
                             cvPoint( center.x + d, center.y + d ),       \
                             color, 1, 0 );                               \
                cvLine( img, cvPoint( center.x + d, center.y - d ),       \
                             cvPoint( center.x - d, center.y + d ),       \
                             color, 1, 0 )

            cvZero( img );
            draw_cross( state_pt, CV_RGB(255,255,255), 3 );
            draw_cross( measurement_pt, CV_RGB(255,0,0), 3 );
            draw_cross( predict_pt, CV_RGB(0,255,0), 3 );
            cvLine( img, state_pt, predict_pt, CV_RGB(255,255,0), 3, 0 );

            /* adjust Kalman filter state */
            cvKalmanCorrect( kalman, measurement );

            cvRandSetRange( &rng,
                            0,
                            sqrt(kalman->process_noise_cov->data.fl[0]),
                            0 );
            cvRand( &rng, process_noise );
            cvMatMulAdd( kalman->transition_matrix,
                         state,
                         process_noise,
                         state );

            cvShowImage( "Kalman", img );
            code = cvWaitKey( 100 );

            if( code > 0 ) /* break current simulation by pressing a key */
                break;
        }
        if( code == 27 ) /* exit by ESCAPE */
            break;
    }

    return 0;
}
Пример #12
0
int main(int argc, char **argv)
{
  std::string tracker_name;
  std::string obj_name;
  std::string input;
  std::string intrinsic;
  std::string distortion;
  int width;
  int height;
  float sample_step;
  bool dull_edge;
  float th_cm;
  int n; // number of particles
  bool display;
  bool net;
  bool save_rslt_txt;
  bool save_rslt_img;
  std::string str_result_path;
  CvMat* pose_init;
  pose_init = cvCreateMat(4, 4, CV_32F);
  cvSetIdentity(pose_init);
  CV_MAT_ELEM(*pose_init, float, 2, 3) = 0.5f; // 0.5 meter in front of camera
  std::string pose_init_str;
  int min_keypoint_matches;
  std::string ach_channel;
  bool use_tracking;

  po::options_description desc("Tracker options");
  desc.add_options()
    ("help,h", "produce help message")
    ("tracker,t", po::value<std::string>(&tracker_name)->default_value("irls"), "name tracker (irls, pf, pf_textureless)")
    ("obj-name,o", po::value<std::string>(&obj_name), "name of traget object")
    ("input,i", po::value<std::string>(&input), "name of camera (e.g. flea) or image sequence path (e.g. dir/seq1/)")
    ("sample-step,s", po::value<float>(&sample_step)->default_value(0.005f), "sample step")
    ("num-particle,n", po::value<int>(&n)->default_value(1), "number of particles")
    ("save-txt", po::value<bool>(&save_rslt_txt)->default_value(false), "save results in text file")
    ("save-img", po::value<bool>(&save_rslt_img)->default_value(false), "save results in image files")
    ("save-path", po::value<std::string>(&str_result_path), "save results in image files")
    
    ("display", po::value<bool>(&display)->default_value(true), "display results or not")
    ("network", po::value<bool>(&net)->default_value(false), "use network mode or not")
    ("width", po::value<int>(&width)->default_value(640), "width")
    ("height", po::value<int>(&height)->default_value(480), "height")
    ("intrinsic", po::value<std::string>(&intrinsic)->default_value("Intrinsics_normal.xml"), "intrinsic parameters")
    ("distortion", po::value<std::string>(&distortion)->default_value("Distortion_normal.xml"), "distortion parameters")
    ("dull_edge", po::value<bool>(&dull_edge)->default_value(false), "consider dull edges")
    ("th_cm", po::value<float>(&th_cm)->default_value(0.2f), "threshold of chamfer matching")
    ("init_pose", po::value<std::string>(&pose_init_str), "init pose")
    // AKAN
    ("min_keypoint_matches", po::value<int>(&min_keypoint_matches)->default_value(20), "min number of keypoint matches to start tracking")
    ("use_ach_channel", po::value<std::string>(&ach_channel)->default_value("none"), "Use specific ach channel with given name")
    ("use_tracking", po::value<bool>(&use_tracking)->default_value(true), "Enable tracking after detection of object")
  ;
  po::variables_map vm;
  po::store(po::parse_command_line(argc, argv, desc), vm);
  po::notify(vm);

  if(argc < 2 || vm.count("help"))
  {
    std::cout << desc << std::endl;
    return 1;
  }

  if(input=="ach")
	{
		//sns_init();
	}

  if(obj_name.empty())
  {
    std::cerr << "obj-name should be specified." << std::endl;
    return 1;
  }

  if(save_rslt_txt || save_rslt_img)
  {
    if(str_result_path.empty())
    {
      std::cerr << "No save-path specified when using either save-txt or save-img" << std::endl;
      return 1;
    }
  }

  if(vm.count("input"))
  {
    std::cout << "input: " << vm["input"].as<std::string>() << std::endl;
  }

  if(vm.count("obj-name"))
  {
    std::cout << "obj-name: " << vm["obj-name"].as<std::string>() << std::endl;
  }

  if(vm.count("n"))
  {
    std::cout << "n: " << vm["n"].as<int>() << std::endl;
  }

  if(vm.count("init_pose"))
  {
    std::cout << "init_pose: ";
    std::vector<std::string> tokens;
    boost::split(tokens, pose_init_str, boost::is_any_of(","), boost::token_compress_on);
        
    for(int i = 0; i < tokens.size(); i++)
      std::cout << tokens[i] << " ";
    std::cout << std::endl;

    if(tokens.size() != 16)
      std::cerr << "Not enough number of data in pose. 16 values are required!" << std::endl;
    {
      // oeverwrite pose_init
      for (int i = 0; i < 16; i++)
        pose_init->data.fl[i] = std::atof(tokens[i].c_str());
    }
  }

  TrackerBase* tracker;
  if(tracker_name.compare("irls") == 0)
  {
    tracker = new IrlsTracker ();
    tracker->setMinKeypointMatches(min_keypoint_matches);
  }
  else if(tracker_name.compare("pf") == 0)
  {
    tracker = new TextureParticleFilterTracker ();
    ((TextureParticleFilterTracker*)tracker)->setNumParticle(n);
    ((TextureParticleFilterTracker*)tracker)->initParticleFilter();
  }
  else if(tracker_name.compare("pf_textureless") == 0)
  {
    tracker = new TexturelessParticleFilterTracker ();
    ((TexturelessParticleFilterTracker*)tracker)->setNumParticle(n);
    ((TexturelessParticleFilterTracker*)tracker)->setThresholdCM(th_cm);
    ((TexturelessParticleFilterTracker*)tracker)->initParticleFilter();
  }
  else
  {
    std::cerr << "Unknown tracker method name: " << tracker_name << std::endl;
    return 1;
  }

  tracker->setSampleStep(sample_step);
  tracker->setDisplay(display);
  tracker->setNetworkMode(net);
  tracker->setSaveResultText(save_rslt_txt);
  tracker->setSaveResultImage(save_rslt_img);
  tracker->setSaveResultPath(str_result_path);
  tracker->setConsideringDullEdges(dull_edge);
  tracker->setTracking(use_tracking);
    
  tracker->initTracker(obj_name, input, intrinsic, distortion, width, height, pose_init , ach_channel);
  tracker->run();
  delete tracker;

  return (0);
}
Пример #13
0
void
cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,
                   const CvMat* _distCoeffs,
                   const CvMat* _R, const CvMat* _P )
{
    CV_FUNCNAME( "cvUndistortPoints" );

    __BEGIN__;

    double A[3][3], RR[3][3], k[5]={0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
    CvMat _A=cvMat(3, 3, CV_64F, A), _Dk;
    CvMat _RR=cvMat(3, 3, CV_64F, RR);
    const CvPoint2D32f* srcf;
    const CvPoint2D64f* srcd;
    CvPoint2D32f* dstf;
    CvPoint2D64f* dstd;
    int stype, dtype;
    int sstep, dstep;
    int i, j, n;

    CV_ASSERT( CV_IS_MAT(_src) && CV_IS_MAT(_dst) &&
        (_src->rows == 1 || _src->cols == 1) &&
        (_dst->rows == 1 || _dst->cols == 1) &&
        CV_ARE_SIZES_EQ(_src, _dst) &&
        (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) &&
        (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2));

    CV_ASSERT( CV_IS_MAT(_cameraMatrix) && CV_IS_MAT(_distCoeffs) &&
        _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 &&
        (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&
        (_distCoeffs->rows*_distCoeffs->cols == 4 ||
        _distCoeffs->rows*_distCoeffs->cols == 5) );
    _Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,
        CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);
    cvConvert( _cameraMatrix, &_A );
    cvConvert( _distCoeffs, &_Dk );

    if( _R )
    {
        CV_ASSERT( CV_IS_MAT(_R) && _R->rows == 3 && _R->cols == 3 );
        cvConvert( _R, &_RR );
    }
    else
        cvSetIdentity(&_RR);

    if( _P )
    {
        double PP[3][3];
        CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP);
        CV_ASSERT( CV_IS_MAT(_P) && _P->rows == 3 && (_P->cols == 3 || _P->cols == 4));
        cvConvert( cvGetCols(_P, &_P3x3, 0, 3), &_PP );
        cvMatMul( &_PP, &_RR, &_RR );
    }
    
    srcf = (const CvPoint2D32f*)_src->data.ptr;
    srcd = (const CvPoint2D64f*)_src->data.ptr;
    dstf = (CvPoint2D32f*)_dst->data.ptr;
    dstd = (CvPoint2D64f*)_dst->data.ptr;
    stype = CV_MAT_TYPE(_src->type);
    dtype = CV_MAT_TYPE(_dst->type);
    sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype);
    dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype);
    
    n = _src->rows + _src->cols - 1;

    fx = A[0][0];
    fy = A[1][1];
    ifx = 1./fx;
    ify = 1./fy;
    cx = A[0][2];
    cy = A[1][2];

    for( i = 0; i < n; i++ )
    {
        double x, y, x0, y0;
        if( stype == CV_32FC2 )
        {
            x = srcf[i*sstep].x;
            y = srcf[i*sstep].y;
        }
        else
        {
            x = srcd[i*sstep].x;
            y = srcd[i*sstep].y;
        }
        
        x0 = x = (x - cx)*ifx;
        y0 = y = (y - cy)*ify;

        // compensate distortion iteratively
        for( j = 0; j < 5; j++ )
        {
            double r2 = x*x + y*y;
            double icdist = 1./(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
            double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
            double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
            x = (x0 - deltaX)*icdist;
            y = (y0 - deltaY)*icdist;
        }
        
        double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2];
        double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2];
        double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]);
        x = xx*ww;
        y = yy*ww;

        if( dtype == CV_32FC2 )
        {
            dstf[i*dstep].x = (float)x;
            dstf[i*dstep].y = (float)y;
        }
        else
        {
            dstd[i*dstep].x = x;
            dstd[i*dstep].y = y;
        }
    }

    __END__;
}
Пример #14
0
void
cvInitUndistortRectifyMap( const CvMat* A, const CvMat* distCoeffs,
    const CvMat *R, const CvMat* Ar, CvArr* mapxarr, CvArr* mapyarr )
{
    CV_FUNCNAME( "cvInitUndistortMap" );

    __BEGIN__;
    
    double a[9], ar[9], r[9], ir[9], k[5]={0,0,0,0,0};
    int coi1 = 0, coi2 = 0;
    CvMat mapxstub, *_mapx = (CvMat*)mapxarr;
    CvMat mapystub, *_mapy = (CvMat*)mapyarr;
    CvMat _a = cvMat( 3, 3, CV_64F, a );
    CvMat _k = cvMat( 4, 1, CV_64F, k );
    CvMat _ar = cvMat( 3, 3, CV_64F, ar );
    CvMat _r = cvMat( 3, 3, CV_64F, r );
    CvMat _ir = cvMat( 3, 3, CV_64F, ir );
    int i, j;
    double fx, fy, u0, v0, k1, k2, k3, p1, p2;
    CvSize size;

    CV_CALL( _mapx = cvGetMat( _mapx, &mapxstub, &coi1 ));
    CV_CALL( _mapy = cvGetMat( _mapy, &mapystub, &coi2 ));

    if( coi1 != 0 || coi2 != 0 )
        CV_ERROR( CV_BadCOI, "The function does not support COI" );

    if( CV_MAT_TYPE(_mapx->type) != CV_32FC1 )
        CV_ERROR( CV_StsUnsupportedFormat, "Both maps must have 32fC1 type" );

    if( !CV_ARE_TYPES_EQ( _mapx, _mapy ))
        CV_ERROR( CV_StsUnmatchedFormats, "" );

    if( !CV_ARE_SIZES_EQ( _mapx, _mapy ))
        CV_ERROR( CV_StsUnmatchedSizes, "" );

    if( A )
    {
        if( !CV_IS_MAT(A) || A->rows != 3 || A->cols != 3  ||
            (CV_MAT_TYPE(A->type) != CV_32FC1 && CV_MAT_TYPE(A->type) != CV_64FC1) )
            CV_ERROR( CV_StsBadArg, "Intrinsic matrix must be a valid 3x3 floating-point matrix" );
        cvConvert( A, &_a );
    }
    else
        cvSetIdentity( &_a );

    if( Ar )
    {
        CvMat Ar33;
        if( !CV_IS_MAT(Ar) || Ar->rows != 3 || (Ar->cols != 3 && Ar->cols != 4) ||
            (CV_MAT_TYPE(Ar->type) != CV_32FC1 && CV_MAT_TYPE(Ar->type) != CV_64FC1) )
            CV_ERROR( CV_StsBadArg, "The new intrinsic matrix must be a valid 3x3 floating-point matrix" );
        cvGetCols( Ar, &Ar33, 0, 3 );
        cvConvert( &Ar33, &_ar );
    }
    else
        cvSetIdentity( &_ar );

    if( !CV_IS_MAT(R) || R->rows != 3 || R->cols != 3  ||
        (CV_MAT_TYPE(R->type) != CV_32FC1 && CV_MAT_TYPE(R->type) != CV_64FC1) )
        CV_ERROR( CV_StsBadArg, "Rotaion/homography matrix must be a valid 3x3 floating-point matrix" );

    if( distCoeffs )
    {
        CV_ASSERT( CV_IS_MAT(distCoeffs) &&
            (distCoeffs->rows == 1 || distCoeffs->cols == 1) &&
            (distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) == 4 ||
            distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) == 5) &&
            (CV_MAT_DEPTH(distCoeffs->type) == CV_64F ||
            CV_MAT_DEPTH(distCoeffs->type) == CV_32F) );
        _k = cvMat( distCoeffs->rows, distCoeffs->cols,
                CV_MAKETYPE(CV_64F, CV_MAT_CN(distCoeffs->type)), k );
        cvConvert( distCoeffs, &_k );
    }
    else
        cvZero( &_k );
    
    cvConvert( R, &_r );    // rectification matrix
    cvMatMul( &_ar, &_r, &_r ); // Ar*R
    cvInvert( &_r, &_ir );  // inverse: R^-1*Ar^-1

    u0 = a[2]; v0 = a[5];
    fx = a[0]; fy = a[4];
    k1 = k[0]; k2 = k[1]; k3 = k[4];
    p1 = k[2]; p2 = k[3];

    size = cvGetMatSize(_mapx);

    for( i = 0; i < size.height; i++ )
    {
        float* mapx = (float*)(_mapx->data.ptr + _mapx->step*i);
        float* mapy = (float*)(_mapy->data.ptr + _mapy->step*i);
        double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8];

        for( j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] )
        {
            double w = 1./_w, x = _x*w, y = _y*w;
            double x2 = x*x, y2 = y*y;
            double r2 = x2 + y2, _2xy = 2*x*y;
            double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2;
            double u = fx*(x*kr + p1*_2xy + p2*(r2 + 2*x2)) + u0;
            double v = fy*(y*kr + p1*(r2 + 2*y2) + p2*_2xy) + v0; 
            mapx[j] = (float)u;
            mapy[j] = (float)v;
        }
    }

    __END__;
}
Пример #15
0
void cvComputeRQDecomposition(CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ, CvMat *matrixQx, CvMat *matrixQy, CvMat *matrixQz, CvPoint3D64f *eulerAngles)
{
	
	CvMat *tmpMatrix1 = 0;
	CvMat *tmpMatrix2 = 0;
	CvMat *tmpMatrixM = 0;
	CvMat *tmpMatrixR = 0;
	CvMat *tmpMatrixQ = 0;
	CvMat *tmpMatrixQx = 0;
	CvMat *tmpMatrixQy = 0;
	CvMat *tmpMatrixQz = 0;
	double tmpEulerAngleX, tmpEulerAngleY, tmpEulerAngleZ;
	
	CV_FUNCNAME("cvRQDecomp3x3");
    __CV_BEGIN__;
	
	/* Validate parameters. */
	if(matrixM == 0 || matrixR == 0 || matrixQ == 0)
		CV_ERROR(CV_StsNullPtr, "Some of parameters is a NULL pointer!");
	
	if(!CV_IS_MAT(matrixM) || !CV_IS_MAT(matrixR) || !CV_IS_MAT(matrixQ))
		CV_ERROR(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");
	
	if(matrixM->cols != 3 || matrixM->rows != 3 || matrixR->cols != 3 || matrixR->rows != 3 || matrixQ->cols != 3 || matrixQ->rows != 3)
		CV_ERROR(CV_StsUnmatchedSizes, "Size of matrices must be 3x3!");
	
	CV_CALL(tmpMatrix1 = cvCreateMat(3, 3, CV_64F));
	CV_CALL(tmpMatrix2 = cvCreateMat(3, 3, CV_64F));
	CV_CALL(tmpMatrixM = cvCreateMat(3, 3, CV_64F));
	CV_CALL(tmpMatrixR = cvCreateMat(3, 3, CV_64F));
	CV_CALL(tmpMatrixQ = cvCreateMat(3, 3, CV_64F));
	CV_CALL(tmpMatrixQx = cvCreateMat(3, 3, CV_64F));
	CV_CALL(tmpMatrixQy = cvCreateMat(3, 3, CV_64F));
	CV_CALL(tmpMatrixQz = cvCreateMat(3, 3, CV_64F));
	
	cvCopy(matrixM, tmpMatrixM);
	
	/* Find Givens rotation Q_x for x axis. */
	/*
	      ( 1  0  0 )
	 Qx = ( 0  c -s ), cos = -m33/sqrt(m32^2 + m33^2), cos = m32/sqrt(m32^2 + m33^2)
		  ( 0  s  c )
	 */
	
	double x, y, z, c, s;
	x = cvmGet(tmpMatrixM, 2, 1);
	y = cvmGet(tmpMatrixM, 2, 2);
	z = x * x + y * y;
	assert(z != 0); // Prevent division by zero.
	c = -y / sqrt(z);
	s = x / sqrt(z);
	
	cvSetIdentity(tmpMatrixQx);
	cvmSet(tmpMatrixQx, 1, 1, c);
	cvmSet(tmpMatrixQx, 1, 2, -s);
	cvmSet(tmpMatrixQx, 2, 1, s);
	cvmSet(tmpMatrixQx, 2, 2, c);
	
	tmpEulerAngleX = acos(c) * 180.0 / CV_PI;
	
	/* Multiply M on the right by Q_x. */
	
	cvMatMul(tmpMatrixM, tmpMatrixQx, tmpMatrixR);
	cvCopy(tmpMatrixR, tmpMatrixM);
	
	assert(cvmGet(tmpMatrixM, 2, 1) < CV_VERYSMALLDOUBLE && cvmGet(tmpMatrixM, 2, 1) > -CV_VERYSMALLDOUBLE); // Should actually be zero.
	
	if(cvmGet(tmpMatrixM, 2, 1) != 0.0)
		cvmSet(tmpMatrixM, 2, 1, 0.0); // Rectify arithmetic precision error.
	
	/* Find Givens rotation for y axis. */
	/*
	      ( c  0  s )
	 Qy = ( 0  1  0 ), cos = m33/sqrt(m31^2 + m33^2), cos = m31/sqrt(m31^2 + m33^2)
	      (-s  0  c )
	 */
	
	x = cvmGet(tmpMatrixM, 2, 0);
	y = cvmGet(tmpMatrixM, 2, 2);
	z = x * x + y * y;
	assert(z != 0); // Prevent division by zero.
	c = y / sqrt(z);
	s = x / sqrt(z);
	
	cvSetIdentity(tmpMatrixQy);
	cvmSet(tmpMatrixQy, 0, 0, c);
	cvmSet(tmpMatrixQy, 0, 2, s);
	cvmSet(tmpMatrixQy, 2, 0, -s);
	cvmSet(tmpMatrixQy, 2, 2, c);
	
	tmpEulerAngleY = acos(c) * 180.0 / CV_PI;
	
	/* Multiply M*Q_x on the right by Q_y. */
	
	cvMatMul(tmpMatrixM, tmpMatrixQy, tmpMatrixR);
	cvCopy(tmpMatrixR, tmpMatrixM);
	
	assert(cvmGet(tmpMatrixM, 2, 0) < CV_VERYSMALLDOUBLE && cvmGet(tmpMatrixM, 2, 0) > -CV_VERYSMALLDOUBLE); // Should actually be zero.
	
	if(cvmGet(tmpMatrixM, 2, 0) != 0.0)
		cvmSet(tmpMatrixM, 2, 0, 0.0); // Rectify arithmetic precision error.
	
	/* Find Givens rotation for z axis. */
	/*
	      ( c -s  0 )
	 Qz = ( s  c  0 ), cos = -m22/sqrt(m21^2 + m22^2), cos = m21/sqrt(m21^2 + m22^2)
	      ( 0  0  1 )
	 */
	
	x = cvmGet(tmpMatrixM, 1, 0);
	y = cvmGet(tmpMatrixM, 1, 1);
	z = x * x + y * y;
	assert(z != 0); // Prevent division by zero.
	c = -y / sqrt(z);
	s = x / sqrt(z);
	
	cvSetIdentity(tmpMatrixQz);
	cvmSet(tmpMatrixQz, 0, 0, c);
	cvmSet(tmpMatrixQz, 0, 1, -s);
	cvmSet(tmpMatrixQz, 1, 0, s);
	cvmSet(tmpMatrixQz, 1, 1, c);
	
	tmpEulerAngleZ = acos(c) * 180.0 / CV_PI;
	
	/* Multiply M*Q_x*Q_y on the right by Q_z. */
	
	cvMatMul(tmpMatrixM, tmpMatrixQz, tmpMatrixR);
	
	assert(cvmGet(tmpMatrixR, 1, 0) < CV_VERYSMALLDOUBLE && cvmGet(tmpMatrixR, 1, 0) > -CV_VERYSMALLDOUBLE); // Should actually be zero.
	
	if(cvmGet(tmpMatrixR, 1, 0) != 0.0)
		cvmSet(tmpMatrixR, 1, 0, 0.0); // Rectify arithmetic precision error.
	
	/* Calulate orthogonal matrix. */
	/*
	 Q = QzT * QyT * QxT
	 */
	
	cvTranspose(tmpMatrixQz, tmpMatrix1);
	cvTranspose(tmpMatrixQy, tmpMatrix2);
	cvMatMul(tmpMatrix1, tmpMatrix2, tmpMatrixQ);
	cvCopy(tmpMatrixQ, tmpMatrix1);
	cvTranspose(tmpMatrixQx, tmpMatrix2);
	cvMatMul(tmpMatrix1, tmpMatrix2, tmpMatrixQ);
	
	/* Solve decomposition ambiguity. */
	/*
	 Diagonal entries of R should be positive, so swap signs if necessary.
	 */
	
	if(cvmGet(tmpMatrixR, 0, 0) < 0.0) {
		cvmSet(tmpMatrixR, 0, 0, -1.0 * cvmGet(tmpMatrixR, 0, 0));
		cvmSet(tmpMatrixQ, 0, 0, -1.0 * cvmGet(tmpMatrixQ, 0, 0));
		cvmSet(tmpMatrixQ, 0, 1, -1.0 * cvmGet(tmpMatrixQ, 0, 1));
		cvmSet(tmpMatrixQ, 0, 2, -1.0 * cvmGet(tmpMatrixQ, 0, 2));
	}
	if(cvmGet(tmpMatrixR, 1, 1) < 0.0) {
		cvmSet(tmpMatrixR, 0, 1, -1.0 * cvmGet(tmpMatrixR, 0, 1));
		cvmSet(tmpMatrixR, 1, 1, -1.0 * cvmGet(tmpMatrixR, 1, 1));
		cvmSet(tmpMatrixQ, 1, 0, -1.0 * cvmGet(tmpMatrixQ, 1, 0));
		cvmSet(tmpMatrixQ, 1, 1, -1.0 * cvmGet(tmpMatrixQ, 1, 1));
		cvmSet(tmpMatrixQ, 1, 2, -1.0 * cvmGet(tmpMatrixQ, 1, 2));
	}

	/* Enforce det(Q) = 1 */ 
	if (cvDet(tmpMatrixQ) < 0) 
	{
		for (int i = 0; i < 3; i++) 
		{
			for (int j = 0; j < 3; j++) 
			{
				cvmSet(tmpMatrixQ, j, i, -cvmGet(tmpMatrixQ, j, i)); 
			}
		}
	}
	
	/* Save R and Q matrices. */
	
	cvCopy(tmpMatrixR, matrixR);
	cvCopy(tmpMatrixQ, matrixQ);
	
	if(matrixQx && CV_IS_MAT(matrixQx) && matrixQx->cols == 3 || matrixQx->rows == 3)
		cvCopy(tmpMatrixQx, matrixQx);
	if(matrixQy && CV_IS_MAT(matrixQy) && matrixQy->cols == 3 || matrixQy->rows == 3)
		cvCopy(tmpMatrixQy, matrixQy);
	if(matrixQz && CV_IS_MAT(matrixQz) && matrixQz->cols == 3 || matrixQz->rows == 3)
		cvCopy(tmpMatrixQz, matrixQz);
	
	/* Save Euler angles. */
	
	if(eulerAngles)
		*eulerAngles = cvPoint3D64f(tmpEulerAngleX, tmpEulerAngleY, tmpEulerAngleZ);
	
	__CV_END__;
	
	cvReleaseMat(&tmpMatrix1);
	cvReleaseMat(&tmpMatrix2);
	cvReleaseMat(&tmpMatrixM);
	cvReleaseMat(&tmpMatrixR);
	cvReleaseMat(&tmpMatrixQ);
	cvReleaseMat(&tmpMatrixQx);
	cvReleaseMat(&tmpMatrixQy);
	cvReleaseMat(&tmpMatrixQz);

}