Example #1
0
osg::Geode* myKeyboardEventHandler::PlotEllipse3D(CvMat *X, CvMat *P, osg::Vec4Array* colors )
{
    osg::Geode* geode = new osg::Geode();
    osg::Geometry* pointsGeom = new osg::Geometry();
    osg::Vec3Array* vertices = new osg::Vec3Array;
    CvMat *temp;
    temp=cvCreateMatHeader(3,3,CV_32FC1);
    CvMat *_temp = cvCreateMat (3,3,CV_32FC1);
    CvMat* vect=cvCreateMat (3,1,CV_32FC1);
    CvMat* res6=cvCreateMat (3,1,CV_32FC1);
    //CvMat* vect2=cvCreateMat(1,6,CV_32FC1);
    //CvPoint2D64f proj ;
    //CvMat* m = cvCreateMat(3,1,CV_32FC1);
    float *fstate;
    fstate= X->data.fl;
    float xc,yc,zc;
    xc=fstate[0];
    yc=fstate[1];
    zc=fstate[2];

    cvGetSubRect( P,temp,cvRect(0,0,3,3) );

//    for (int part=0; part<100; part++){
    for(float t = 0; t<2*3.14159; t+=0.1)
        for (float p = -3.14159/2; p<3.14159;p+=0.1)
        {
        cvmSet(vect,0,0,7.8*cos(p)*cos(t));
        cvmSet(vect,1,0,7.8*cos(p)*sin(t));
        cvmSet(vect,2,0,7.8*sin(p));
        cvZero(_temp);
        pSlam->Cholesky(temp,_temp);
        cvMatMul(_temp,vect,res6);

        double ox=cvmGet(res6,0,0)+xc;
        double oy=cvmGet(res6,1,0)+yc;
        double oz=cvmGet(res6,2,0)+zc;

        if (ox> -100000 && ox<10000 &&
            oy> -100000 && oy<10000 &&
            oz> -100000 && oz<10000)
        {
            CvPoint3D64f point = cvPoint3D64f(ox,oy,oz);
            vertices->push_back(osg::Vec3(ox,oy,oz));
        }
    }

    // pass the created vertex array to the points geometry object.
    pointsGeom->setVertexArray(vertices);

    // pass the color array to points geometry, note the binding to tell the geometry
    // that only use one color for the whole object.
    pointsGeom->setColorArray(colors);
    pointsGeom->setColorBinding(osg::Geometry::BIND_OVERALL);

    // set the normal in the same way color.
    osg::Vec3Array* normals = new osg::Vec3Array;
    normals->push_back(osg::Vec3(0.0f,-1.0f,0.0f));
    pointsGeom->setNormalArray(normals);
    pointsGeom->setNormalBinding(osg::Geometry::BIND_OVERALL);


    // create and add a DrawArray Primitive (see include/osg/Primitive).  The first
    // parameter passed to the DrawArrays constructor is the Primitive::Mode which
    // in this case is POINTS (which has the same value GL_POINTS), the second
    // parameter is the index position into the vertex array of the first point
    // to draw, and the third parameter is the number of points to draw.
    pointsGeom->addPrimitiveSet(new osg::DrawArrays(osg::PrimitiveSet::POINTS,0,vertices->size()));


    // add the points geometry to the geode.
    geode->addDrawable(pointsGeom);
    // Create a new StateSet with default settings:
    osg::StateSet* stateOne = new osg::StateSet();
    osg::Point *  point = new osg::Point();
    point->setSize(1.0);
    stateOne->setAttributeAndModes(point,osg::StateAttribute::ON);
    geode->setStateSet(stateOne);
    return geode;

}
Example #2
0
KalmanPoint *NewKalman(PointSeq *Point_New, int ID, int frame_count, int contourArea) {
	const float A[] = { 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1 };
	const float H[] = { 1, 0, 0, 0, 0, 1, 0, 0 };

	KalmanPoint *Kalmanfilter;
	Kalmanfilter = (KalmanPoint *) malloc(sizeof(KalmanPoint));
	CvKalman *Kalman = cvCreateKalman(4, 2, 0);

	float measure[2] =
			{ (float) Point_New->Point.x, (float) Point_New->Point.y };
	CvMat measurement = cvMat(2, 1, CV_32FC1, measure);

	// initialize the Kalman filter
	memcpy(Kalman->transition_matrix->data.fl, A, sizeof(A));
	memcpy(Kalman->measurement_matrix->data.fl, H, sizeof(H));
	cvSetIdentity(Kalman->error_cov_post, cvRealScalar(10));
	cvSetIdentity(Kalman->process_noise_cov, cvRealScalar(1e-5));
	cvSetIdentity(Kalman->measurement_noise_cov, cvRealScalar(1e-1));

	// for the first time, use the measured point as the state_post
	cvZero(Kalman->state_post);
	cvmSet(Kalman->state_post, 0, 0, Point_New->Point.x);
	cvmSet(Kalman->state_post, 1, 0, Point_New->Point.y);

	// use Kalman  filter to predict the position of the centroid
	const CvMat *prediction = cvKalmanPredict(Kalman, 0);
	// use the measurement to correct the position
	const CvMat *correction = cvKalmanCorrect(Kalman, &measurement);

	/*
	 // test code
	 printf("measurement %f,%f\n",cvmGet(&measurement,0,0),cvmGet(&measurement, 1, 0));
	 printf("prediction %f,%f\n",cvmGet(prediction, 0, 0),cvmGet(prediction, 1, 0));
	 printf("correction %f,%f\ndelta %f,%f\n",cvmGet(correction, 0, 0),cvmGet(correction, 1, 0),
	 cvmGet(correction, 2, 0), cvmGet(correction, 3, 0));
	 */
	Kalmanfilter->Point_now = Point_New->Point;
	Kalmanfilter->Point_pre = Point_New->Point;
	Kalmanfilter->firstPoint = Point_New->Point;
	Kalmanfilter->firstFrame = frame_count; //tambahan 25 januari 2016 speed measurement
	Kalmanfilter->lastFrame = frame_count; //tambahan 25 januari 2016 speed measurement
	Kalmanfilter->Kalman = Kalman;
	Kalmanfilter->ID = Point_New->ID = ID;
	Kalmanfilter->contourArea=contourArea;
	/*
	//nilai 1 = mobil nilai 2= truk nilai 3 = undefined
	if (Kalmanfilter->contourArea >= 4800
			&& Kalmanfilter->contourArea <= 19900) {
		Kalmanfilter->jenis = 1;

	} else if (Kalmanfilter->contourArea > 19900) {
		Kalmanfilter->jenis = 2;

	} else {
		Kalmanfilter->jenis = 3;
	}
	*/
	//nilai 1 = motor;nilai 2 = mobil;nilai 3 = truk sedang;nilai 4 = truk besar;nilai 0 = undefined;
	if (Kalmanfilter->contourArea >= 4000
			&& Kalmanfilter->contourArea <= 7000) {
		Kalmanfilter->jenis = 2;
	} else if (Kalmanfilter->contourArea > 7000
			&& Kalmanfilter->contourArea <= 12000) {
		Kalmanfilter->jenis = 3;
	} else if (Kalmanfilter->contourArea > 12000) {
		Kalmanfilter->jenis = 4;
	} else {
		Kalmanfilter->jenis = 0;
	}
	//Kalmanfilter->jenis= 0;//tambahan 26 januari 2016
	Kalmanfilter->Loss = 0;

	return Kalmanfilter;
}
Example #3
0
//============================================================================
void AAM_Basic::EstCParamGradientMatrix(CvMat* GParam,
		const CvMat* CParams,
		const std::vector<AAM_Shape>& AllShapes, 
		const std::vector<IplImage*>& AllImages,
		const CvMat* vCDisps)
{
	int nExperiment = 0;
	int ntotalExp = AllShapes.size()*vCDisps->rows/2*__cam.nModes();
    int np = __cam.nModes();
	int npixels = __cam.__texture.nPixels();
	int npointsby2 = __cam.__shape.nPoints()*2;
	CvMat c;											//appearance parameters
    CvMat* c1 = cvCreateMat(1, np, CV_64FC1);
    CvMat* c2 = cvCreateMat(1, np, CV_64FC1);
	CvMat* s1 = cvCreateMat(1, npointsby2, CV_64FC1);	//shape vector
    CvMat* s2 = cvCreateMat(1, npointsby2, CV_64FC1);
    CvMat* t1 = cvCreateMat(1, npixels, CV_64FC1);		//texture vector
    CvMat* t2 = cvCreateMat(1, npixels, CV_64FC1);
	CvMat* delta_g1 = cvCreateMat(1, npixels, CV_64FC1);
	CvMat* delta_g2 = cvCreateMat(1, npixels, CV_64FC1);
	CvMat* cDiff = cvCreateMat(1, npixels, CV_64FC1);
    AAM_Shape aam_s1, aam_s2;
	std::vector<double> normFactors; normFactors.resize(np);

	// for each training example in the training set
	for(int i = 0; i < AllShapes.size(); i++)
	{
		cvGetRow(CParams, &c, i);

		for(int j = 0; j < vCDisps->rows; j+=2)
		{
			for(int k = 0; k < vCDisps->cols; k++)
			{
				printf("AllShapes[%d]: Performing (%d/%d)\r", i, nExperiment++, ntotalExp);

				//shift current appearance parameters 
				cvCopy(&c, c1);
                cvCopy(&c, c2);
				cvmSet(c1, 0, k, cvmGet(&c,0,k)+cvmGet(vCDisps,j,k));
				cvmSet(c2, 0, k, cvmGet(&c,0,k)+cvmGet(vCDisps,j+1,k));

				//generate model texture
				__cam.CalcTexture(t1, c1);
				__cam.CalcTexture(t2, c2);

				//generate model shape
				__cam.CalcLocalShape(s1, c1); aam_s1.Mat2Point(s1);
				__cam.CalcLocalShape(s2, c2); aam_s2.Mat2Point(s2);

				//align model shape to annotated shape
				aam_s1.AlignTo(AllShapes[i]); aam_s1.Point2Mat(s1);
				aam_s2.AlignTo(AllShapes[i]); aam_s2.Point2Mat(s2);

				//sample the shape to get warped texture
				__cam.__paw.FasterGetWarpTextureFromMatShape(s1, AllImages[i], delta_g1, true);
				__cam.__texture.AlignTextureToRef(__cam.__MeanG, delta_g1);
				
				__cam.__paw.FasterGetWarpTextureFromMatShape(s2, AllImages[i], delta_g2, true);
				__cam.__texture.AlignTextureToRef(__cam.__MeanG, delta_g2);
				
				//calculate pixel difference(g_s - g_m)
				cvSub(delta_g1, t1, delta_g1);
				cvSub(delta_g2, t2, delta_g2);
				
				//form central displacement
				cvSub(delta_g2, delta_g1, cDiff);
				cvConvertScale(cDiff, cDiff, 1.0/(cvmGet(vCDisps,j+1,k)-cvmGet(vCDisps,j,k)));

				//accumulate into k-th row
				CvMat Gk; cvGetRow(GParam, &Gk, k);
				cvAdd(&Gk, cDiff, &Gk);

				//increment normalisation factor
				normFactors[k] = normFactors[k]+1;
			}
		}
	}

	//normalize
	for(int j = 0; j < np; j++)
	{
		CvMat Gj; cvGetRow(GParam, &Gj, j);
		cvConvertScale(&Gj, &Gj, 1.0/normFactors[j]);
	}

	cvReleaseMat(&c1); cvReleaseMat(&c2);
	cvReleaseMat(&s1); cvReleaseMat(&s2);
	cvReleaseMat(&t1); cvReleaseMat(&t2);
	cvReleaseMat(&delta_g1); cvReleaseMat(&delta_g2);
	cvReleaseMat(&cDiff);
}
Example #4
0
CvMat* AbstractCamera::computeRtMatrix(double a, double b, double g, double tX, double tY, double tZ) {

	//--- Represent 3d rotation with euler angles

	double sinG = sin(g);
	double cosG = cos(g);
	CvMat* rZg = cvCreateMat(3, 3, CV_32FC1);
	cvSetZero(rZg);
	cvmSet(rZg, 0, 0, cosG);
	cvmSet(rZg, 0, 1, -sinG);
	cvmSet(rZg, 1, 0, sinG);
	cvmSet(rZg, 1, 1, cosG);
	cvmSet(rZg, 2, 2, 1.0f);

	double sinB = sin(b);
	double cosB = cos(b);
	CvMat* rXb = cvCreateMat(3, 3, CV_32FC1);
	cvSetZero(rXb);
	cvmSet(rXb, 0, 0, 1.0f);
	cvmSet(rXb, 1, 1, cosB);
	cvmSet(rXb, 1, 2, -sinB);
	cvmSet(rXb, 2, 1, sinB);
	cvmSet(rXb, 2, 2, cosB);

	double sinA = sin(a);
	double cosA = cos(a);
	CvMat* rZa = cvCreateMat(3, 3, CV_32FC1);
	cvSetZero(rZa);
	cvmSet(rZa, 0, 0, cosA);
	cvmSet(rZa, 0, 1, -sinA);
	cvmSet(rZa, 1, 0, sinA);
	cvmSet(rZa, 1, 1, cosA);
	cvmSet(rZa, 2, 2, 1.0f);

	CvMat* rotationMatrix = cvCreateMat(3, 3, CV_32FC1);
	cvMatMul(rZg, rXb, rotationMatrix);
	cvMatMul(rotationMatrix, rZa, rotationMatrix);

	cvReleaseMat(&rZg);
	cvReleaseMat(&rXb);
	cvReleaseMat(&rZa);

	//--- [R|T] ; First camera rotation and translation matrix
	CvMat* rtMatrix = cvCreateMat(3, 4, CV_32FC1);
	for (int i = 0; i < 3; i++) {
		cvmSet(rtMatrix, i, 0, cvmGet(rotationMatrix, i, 0));
		cvmSet(rtMatrix, i, 1, cvmGet(rotationMatrix, i, 1));
		cvmSet(rtMatrix, i, 2, cvmGet(rotationMatrix, i, 2));
	}
	cvmSet(rtMatrix, 0, 3, tX);
	cvmSet(rtMatrix, 1, 3, tY);
	cvmSet(rtMatrix, 2, 3, tZ);

	cvReleaseMat(&rotationMatrix);

	return rtMatrix;
}
Example #5
0
/*
Calculates a planar homography from point correspondeces using the direct
linear transform.  Intended for use as a ransac_xform_fn.
  
@param pts array of points
@param mpts array of corresponding points; each pts[i], i=0..n-1,
  corresponds to mpts[i]
@param n number of points in both pts and mpts; must be at least 4
  
@return Returns the 3x3 planar homography matrix that transforms points
  in pts to their corresponding points in mpts or NULL if fewer than 4
  correspondences were provided
*/
CvMat* dlt_homog( CvPoint2D64f* pts, CvPoint2D64f* mpts, int n )
{
	CvMat* H, * A, * VT, * D, h, v9;
	double _h[9];
	int i;

	if( n < 4 )
	    return NULL;

	/* set up matrices so we can unstack homography into h; Ah = 0 */
	A = cvCreateMat( 2*n, 9, CV_64FC1 );
	cvZero( A );
	for( i = 0; i < n; i++ )
	{
		cvmSet( A, 2*i, 3, -pts[i].x );
		cvmSet( A, 2*i, 4, -pts[i].y );
		cvmSet( A, 2*i, 5, -1.0  );
		cvmSet( A, 2*i, 6, mpts[i].y * pts[i].x );
		cvmSet( A, 2*i, 7, mpts[i].y * pts[i].y );
		cvmSet( A, 2*i, 8, mpts[i].y );
		cvmSet( A, 2*i+1, 0, pts[i].x );
		cvmSet( A, 2*i+1, 1, pts[i].y );
		cvmSet( A, 2*i+1, 2, 1.0  );
		cvmSet( A, 2*i+1, 6, -mpts[i].x * pts[i].x );
		cvmSet( A, 2*i+1, 7, -mpts[i].x * pts[i].y );
		cvmSet( A, 2*i+1, 8, -mpts[i].x );
	}
	D = cvCreateMat( 9, 9, CV_64FC1 );
	VT = cvCreateMat( 9, 9, CV_64FC1 );
	cvSVD( A, D, NULL, VT, CV_SVD_MODIFY_A + CV_SVD_V_T );
	v9 = cvMat( 1, 9, CV_64FC1, NULL );
	cvGetRow( VT, &v9, 8 );
	h = cvMat( 1, 9, CV_64FC1, _h );
	cvCopy( &v9, &h, NULL );
	h = cvMat( 3, 3, CV_64FC1, _h );
	H = cvCreateMat( 3, 3, CV_64FC1 );
	cvConvert( &h, H );

	cvReleaseMat( &A );
	cvReleaseMat( &D );
	cvReleaseMat( &VT );
	return H;
}
Example #6
0
	bool BazARTracker::init(int xsize, int ysize, 
			const std::string& bazar_config_name,
			const std::string& camera_name)
	{
		
		/*OSGART*/
		ARParam  wparam;
	    // Set the initial camera parameters.
		cparamName = camera_name;
	    if(arParamLoad((char*)cparamName.c_str(), 1, &wparam) < 0) {
			std::cerr << "ERROR: Camera parameter load error." << std::endl;
			return false;
	    }
	    arParamChangeSize(&wparam, xsize, ysize,&(m_cparam->cparam));

		arInitCparam(&(m_cparam->cparam));
	    arParamDisp(&(m_cparam->cparam));
		

	/*BAZAR*/
		// load BazAR's configuration files and detector parameters
		if (!loadBazARConfig(bazar_config_name, &bazconf)) exit(0);
		// load BazAR's camera calibration file
		if (!loadBazARCamParams((char*)(bazconf.camCalFileName),&bazconf)) exit(0);

		// init bazar tracker
		matCameraRT4_4 = cvCreateMat(4, 4, CV_64F); //64 bit double precision float
		g_matIntrinsic   = cvCreateMat(3, 3, CV_64F);
		
		// output windows
		if (getDebugMode()){
			cvNamedWindow("Gray", CV_WINDOW_AUTOSIZE);
			cvNamedWindow("Result_BAZAR", CV_WINDOW_AUTOSIZE);
		}
		// fine tuning for accuracy - careful!!
		detector.ransac_dist_threshold = (float)bazconf.ransac_dist_threshold;
		detector.max_ransac_iterations = (float)bazconf.max_ransac_iterations;
		detector.non_linear_refine_threshold = (float)bazconf.non_linear_refine_threshold;
		detector.match_score_threshold = (float)bazconf.match_score_threshold;	// A lower threshold will allow detection in harder conditions, but
																				// might lead to false positives
		// Train or load classifier
		if(!detector.build_with_cache(
			// hse25: no hard coded stuff :) -- jaja
			(char*)(bazconf.modelFileName),	// mode image file name
			400,               // maximum number of keypoints on the model
			32,                // patch size in pixels
			3,                 // yape radius. Use 3,5 or 7.
			16,                // number of trees for the classifier. Somewhere between 12-50
			3                  // number of levels in the gaussian pyramid
			))
		{
		cerr << "BazARTracker: Unable to load the model image " << (char*)(bazconf.modelFileName) <<" or its classifier.\n";
		return false;
		}
		 
		// set camera parameters for BAZAR
		char	*camCal = (char*)(bazconf.camCalFileName);
		char	*camExt = (char*)(bazconf.camExtFileName);
		if(!augment.LoadOptimalStructureFromFile(camCal, camExt))
		{
			std::cerr << "BazARTracker: couldn't load camera parameters: " << camCal << " " << camExt << std::endl;
			return false;
		}
		
		// image buffers needed for detection, conversion..
		image     = cvCreateImage(cvSize(xsize, ysize), IPL_DEPTH_8U, 4); // captured image
		gray      = cvCreateImage(cvSize(xsize, ysize), IPL_DEPTH_8U, 1); // detector input
		display   = cvCreateImage(cvSize(xsize, ysize), IPL_DEPTH_8U, 4); // debug
		
		// use bazar's camera calibration 
		m_cparam->cparam.mat[0][0] = bazconf.camCalMatrix[0][0];	m_cparam->cparam.mat[0][1] = bazconf.camCalMatrix[0][1];    m_cparam->cparam.mat[0][2] = bazconf.camCalMatrix[0][2];
		m_cparam->cparam.mat[1][0] = bazconf.camCalMatrix[1][0];	m_cparam->cparam.mat[1][1] = bazconf.camCalMatrix[1][1];    m_cparam->cparam.mat[1][2] = bazconf.camCalMatrix[1][2];
		m_cparam->cparam.mat[2][0] = bazconf.camCalMatrix[2][0];	m_cparam->cparam.mat[2][1] = bazconf.camCalMatrix[2][1];    m_cparam->cparam.mat[2][2] = bazconf.camCalMatrix[2][2];
		
		for(int i=0; i<3; i++)
		{
			for(int j=0; j<3; j++)
			{
				cvmSet(g_matIntrinsic, i, j, m_cparam->cparam.mat[i][j]);
			}
		}
	// end BAZAR
	  
		setProjection(10.0f, 8000.0f);
		setDebugMode(m_debugmode);
		
		setupMarkers();

		// Success
		return true;
	}
Example #7
0
IplImage* FacePredict::predict(const AAM_Shape& Shape, const IplImage& curImage,
					const AAM_Shape& ShapeF, const IplImage& ImageF, double RatioF,
					const AAM_Shape& ShapeM, const IplImage& ImageM, double RatioM,
					int curAgeG, int newAgeG, bool save)
{
	if (newAgeG > NGROUPS || curAgeG > NGROUPS)
	{
		fprintf(stderr, "ERROE(%s, %d): Age group larger than %d\n",
			__FILE__, __LINE__, NGROUPS);
		exit(0);
	}

	if(curImage.nChannels != 3 || curImage.depth != 8)
	{
		fprintf(stderr, "ERROR(%s: %d): The image channels must be 3, "
			"and the depth must be 8!\n", __FILE__, __LINE__);
		exit(0);
	}


	/*get the current shape parameters*/
	AAM_Shape curShape = Shape;
	curShape.Centralize();
	double thisfacewidth = curShape.GetWidth();
	if(stdwidth < thisfacewidth) 
		curShape.Scale(stdwidth / thisfacewidth);
	curShape.AlignTo(__AAMRefShape);
	
	CvMat* p = cvCreateMat(1, __nShapeModes, CV_64FC1);
	CvMat* pq = cvCreateMat(1, 4+__nShapeModes, CV_64FC1);
	__shape.CalcParams(curShape, pq);
	cvGetCols(pq, p, 4, 4+__nShapeModes);

	/*get the current texture parameters*/
	CvMat* curTexture = cvCreateMat(1, __paw.nPix() * 3, CV_64FC1);
	__paw.FasterGetWarpTextureFromShape(Shape, &curImage, curTexture, false);
	__texture.AlignTextureToRef(__MeanT, curTexture);
	CvMat* lamda = cvCreateMat(1, __nTextureModes, CV_64FC1);
	__texture.CalcParams(curTexture, lamda);


	//father
	CvMat* pF = cvCreateMat(1, __nShapeModes, CV_64FC1);
	CvMat* lamdaF = cvCreateMat(1, __nTextureModes, CV_64FC1);
	if (RatioF == 0) {
		cvZero(pF);
		cvZero(lamdaF);
	}
	else {
		AAM_Shape shapeF = ShapeF;
		shapeF.Centralize();
		thisfacewidth = ShapeF.GetWidth();
		if(stdwidth < thisfacewidth) 
			shapeF.Scale(stdwidth / thisfacewidth);
		shapeF.AlignTo(__AAMRefShape);
		CvMat* pqF = cvCreateMat(1, 4+__nShapeModes, CV_64FC1);
		__shape.CalcParams(shapeF, pqF);
		cvGetCols(pqF, pF, 4, 4+__nShapeModes);

		CvMat* TextureF = cvCreateMat(1, __paw.nPix() * 3, CV_64FC1);
		__paw.FasterGetWarpTextureFromShape(ShapeF, &ImageF, TextureF, false);
		__texture.AlignTextureToRef(__MeanT, TextureF);
		__texture.CalcParams(TextureF, lamdaF);
	}


	//mother
	CvMat* pM = cvCreateMat(1, __nShapeModes, CV_64FC1);
	CvMat* lamdaM = cvCreateMat(1, __nTextureModes, CV_64FC1);
	if (RatioM == 0) {
		cvZero(pM);
		cvZero(lamdaM);
	}
	else {
		AAM_Shape shapeM = ShapeM;
		shapeM.Centralize();
		thisfacewidth = ShapeM.GetWidth();
		if(stdwidth < thisfacewidth) 
			shapeM.Scale(stdwidth / thisfacewidth);
		shapeM.AlignTo(__AAMRefShape);
		CvMat* pqM = cvCreateMat(1, 4+__nShapeModes, CV_64FC1);
		__shape.CalcParams(shapeM, pqM);
		cvGetCols(pqM, pM, 4, 4+__nShapeModes);

		CvMat* TextureM = cvCreateMat(1, __paw.nPix() * 3, CV_64FC1);
		__paw.FasterGetWarpTextureFromShape(ShapeM, &ImageM, TextureM, false);
		__texture.AlignTextureToRef(__MeanT, TextureM);
		__texture.CalcParams(TextureM, lamdaM);
	}

	/*caculate new shape and texture parameters*/
	CvMat newShapeParams;
	CvMat* newpq = cvCreateMat(1, 4+__nShapeModes, CV_64FC1);
	cvmSet(newpq, 0, 0, cvmGet(pq, 0, 0));	cvmSet(newpq, 0, 1, cvmGet(pq, 0, 1));
	cvmSet(newpq, 0, 2, cvmGet(pq, 0, 2));	cvmSet(newpq, 0, 3, cvmGet(pq, 0, 3));
	cvGetCols(newpq, &newShapeParams, 4, 4+__nShapeModes);
	CvMat* newSP = cvCreateMat(1, __nShapeModes, CV_64FC1);
	FacePredict::CalcNewShapeParams(p, newSP, curAgeG, newAgeG);
	FacePredict::CalcParamsByRatio(newSP, pF, RatioF, pM, RatioM, &newShapeParams);

	CvMat* newTP = cvCreateMat(1, __nTextureModes, CV_64FC1);
	FacePredict::CalcNewTextureParams(lamda, newTP, curAgeG, newAgeG);
	CvMat* newTextureParams = cvCreateMat(1, __nTextureModes, CV_64FC1);
	FacePredict::CalcParamsByRatio(newTP, lamdaF, RatioF, lamdaM, RatioM, newTextureParams);

	/*calculate the new shape and texture*/
	AAM_Shape newShape;
	__shape.CalcShape(newpq, newShape);

	CvMat* newTexture = cvCreateMat(1, __paw.nPix() * 3, CV_64FC1);
	__texture.CalcTexture(newTextureParams, newTexture);

	/*systhetize the shape and texture*/
	
	IplImage* newImage = cvCreateImage(cvSize(stdwidth, stdwidth / newShape.GetWidth() * newShape.GetHeight()), IPL_DEPTH_8U, 3);
	FacePredict::FaceSynthesis(newShape, newTexture, newImage);

	if(save)
		cvSaveImage("facial prediction.jpg", newImage);

	cvReleaseMat(&p);
	cvReleaseMat(&pq);
	cvReleaseMat(&curTexture);
	cvReleaseMat(&lamda);
	cvReleaseMat(&newTextureParams);
	cvReleaseMat(&newpq);
	cvReleaseMat(&newTexture);

	return newImage;
}
Example #8
0
void setviewfromhomography(CvMat* h**o)
{
	CvMat* intrinsic = cvCreateMat(3,3, MAT_TYPE);
	cvZero(intrinsic);

	double fx = 531.398682;
	double fy = 531.806702;
	double cx = 308.162262;
	double cy = 231.762756;

	cvmSet(intrinsic, 0,0, fx);
	cvmSet(intrinsic, 1,1, fy);
	cvmSet(intrinsic, 0,2, cx);
	cvmSet(intrinsic, 1,2, cy);
	cvmSet(intrinsic, 2,2, 1);			
	
	CvMat* inv =  cvCreateMat(3,3, MAT_TYPE);
	cvInvert(intrinsic, inv);
	
	CvMat* rdt0 =  cvCreateMat(3,3, MAT_TYPE);

	cvMatMul(inv, h**o, rdt0);
	
	CvMat* rdt =  cvCreateMat(3,3, MAT_TYPE);
	
	double dvd = sqrt(cvmGet(rdt0,0, 0)*cvmGet(rdt0,0, 0)+cvmGet(rdt0,1, 0)*cvmGet(rdt0,1, 0)+cvmGet(rdt0,2, 0)*cvmGet(rdt0,2, 0));
	if (dvd==0)
		return;

	double norm = (double) ((double) 1.0/dvd);

	for(int i=0;i<3;i++){
		for(int j=0;j<3;j++){
			cvmSet(rdt, i,j, cvmGet(rdt0,i,j)*norm);
		}
	}
	
	CvMat *r1 = cvCreateMat(3,1, MAT_TYPE);
	CvMat *r2 = cvCreateMat(3,1, MAT_TYPE);
	CvMat *r1xr2 = cvCreateMat(3,1, MAT_TYPE);	
	
	for (int i=0;i<3;i++)
	{
		cvmSet(r1, i, 0, cvmGet(rdt,i, 0));
		cvmSet(r2, i, 0, cvmGet(rdt,i, 1));
	}
	
	cvCrossProduct(r1, r2, r1xr2);
	
	CvMat* R = cvCreateMat(3,3, MAT_TYPE);
	CvMat* t = cvCreateMat(3,1, MAT_TYPE);

	cvZero(R);
	cvZero(t);

	for(int y=0;y<2;y++){
		for(int x=0;x<3;x++){
			cvmSet(R, x,y, cvmGet(rdt,x,y));
		}
	}

	for(int x=0;x<3;x++){
		cvmSet(R, x,2,-cvmGet(r1xr2,x,0));
		cvmSet(t, 0,x,cvmGet(rdt,x,2));
	}
	
	setview(R, t, view);
	
	cvReleaseMat(&R);
	cvReleaseMat(&t);
	cvReleaseMat(&r1);
	cvReleaseMat(&r2);
	cvReleaseMat(&rdt);
	cvReleaseMat(&r1xr2);
	cvReleaseMat(&rdt0);
	cvReleaseMat(&inv);
	cvReleaseMat(&intrinsic);
}
static CvMat* mxarray_to_cvmat(const mxArray* p_array)
{
	//The code is actually the same as above
  //wants (width, height)
  CvSize size;//(ncols,nrows);
  //
  //
  const mwSize ndims = mxGetNumberOfDimensions(p_array);
  //
  const mwSize* dims = mxGetDimensions(p_array);
  //
  int channels = 0;

  //SHOULD ACCEPT ONLY MONO CHANNEL NOW

  if(ndims == 3)
  //channels
    channels=static_cast<int>(dims[2]);
  else
    channels = 1 ;

  //
  size.height = static_cast<int>(dims[0]);
  size.width  = static_cast<int>(dims[1]);

  //
  int depth = 0;
	 //CV_<bit_depth>(S|U|F)C<number_of_channels>

  //assign correct type id
  switch (matlab::traits<T>::tag)
  {
  case mxUINT8_CLASS:
    depth = CV_8UC1;
    break;

  case mxSINGLE_CLASS:
    depth = CV_32FC1;
    break;

  case mxINT16_CLASS:
    depth = CV_16SC1;
    break;

  case mxUINT16_CLASS:
    depth =  CV_16UC1;
    break;

  case mxDOUBLE_CLASS:
    depth = CV_64FC1;
    break;

  default:
    depth = CV_64FC1;
    break;
  }

  CvMat* p_mat = cvCreateMat( size.height, size.width,  depth);

  //get pointer to array/image data
  matlab::traits<T>::ptr p_data = 
	  static_cast<matlab::traits<T>::ptr >(mxGetData(p_array));

  //here cycle to convert ...
  const size_t n_pixel = size.height*size.width;
  //
  size_t start_pixel_index = 0;
  //
  const size_t channel_offset = n_pixel;
  //

  for (int row_iter = 0; row_iter < size.height; row_iter++)
  {
    for (int col_iter = 0; col_iter < size.width; col_iter++)
    {
      for (int channel_index = 0; channel_index < channels; channel_index++)
      {
       
		  cvmSet(	  p_mat
					, row_iter
					, col_iter
					//, col_iter*channels
					, static_cast<T>(p_data[(col_iter*size.height)+ row_iter+(channel_index*channel_offset)]));
        //(CV_IMAGE_ELEM( p_iplimage, T, row_iter, (col_iter*channels) ) )= 
        //  static_cast<T>(p_data[(col_iter*size.height)+ row_iter+(channel_index*channel_offset)]);

      }
    }
  }

  //done
  return p_mat;
}
int poisson_solver(IplImage *im_src, IplImage *im_dst, IplImage *im_mask, int channel, int *offset){
    int i, j, loop, neighbor, count_neighbors, flag_edge, ok;
    float error, sum_f, sum_fstar, sum_vpq, fp, fq, gp, gq;
    int naddr[NUM_NEIGHBOR][2]={{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
    CvMat* im_new = cvCreateMat(im_dst->height, im_dst->width, CV_64F);
    for(i=0; i<im_dst->height; i++){
        for(j=0; j<im_dst->width; j++){
            cvmSet(im_new, i, j, (double)((uchar)im_dst->imageData[(i)*im_dst->widthStep + (j)*im_dst->nChannels + channel]));
        }
    }
    
    for(loop=0; loop<LOOP_MAX; loop++){
        ok=1;
        for(i=0; i<im_mask->height; i++){
            for(j=0; j<im_mask->width; j++){
                if(int((uchar)im_mask->imageData[i*im_mask->widthStep + j]) > 0){
                    sum_f=0.0;
                    sum_fstar=0.0;
                    sum_vpq=0.0;
                    count_neighbors=0;
                    flag_edge=0;
                    for(neighbor=0; neighbor<NUM_NEIGHBOR; neighbor++){
                        if(int((uchar)im_mask->imageData[(i+naddr[neighbor][0])*im_mask->widthStep + (j+naddr[neighbor][1])]) == 0){
                            flag_edge = 1;
                            break;
                        }
                    }
                    if(flag_edge == 0){
                        for(neighbor=0; neighbor<NUM_NEIGHBOR; neighbor++){
                            if(i+offset[0]+naddr[neighbor][0] >= 0 && j+offset[1]+naddr[neighbor][1] >= 0 && i+offset[0]+naddr[neighbor][0] < im_dst->height && j+offset[1]+naddr[neighbor][1] < im_dst->width){
                                sum_f += cvmGet(im_new, i+offset[0]+naddr[neighbor][0], j+offset[1]+naddr[neighbor][1]);
                                sum_vpq += float((uchar)im_src->imageData[(i)*im_src->widthStep + (j)*im_src->nChannels + channel]) - float((uchar)im_src->imageData[(i+naddr[neighbor][0])*im_src->widthStep + (j+naddr[neighbor][1])*im_src->nChannels + channel]);
                                count_neighbors++;
                            }
                        }
                    }
                    else{
                        for(neighbor=0; neighbor<NUM_NEIGHBOR; neighbor++){
                            if(i+offset[0]+naddr[neighbor][0] >= 0 && j+offset[1]+naddr[neighbor][1] >= 0 && i+offset[0]+naddr[neighbor][0] < im_dst->height && j+offset[1]+naddr[neighbor][1] < im_dst->width){
                                fp = (double)((uchar)im_dst->imageData[(i+offset[0])*im_dst->widthStep + (j+offset[1])*im_dst->nChannels + channel]);
                                fq = (double)((uchar)im_dst->imageData[(i+offset[0]+naddr[neighbor][0])*im_dst->widthStep + (j+offset[1]+naddr[neighbor][1])*im_dst->nChannels + channel]);
                                gp = (double)((uchar)im_src->imageData[(i)*im_src->widthStep + (j)*im_src->nChannels + channel]);
                                gq = (double)((uchar)im_src->imageData[(i+naddr[neighbor][1])*im_src->widthStep + (j+naddr[neighbor][1])*im_src->nChannels + channel]);
                                sum_fstar += fq;
                                if( fabs(fp-fq) > fabs(gp-gq)){
                                    sum_vpq += fp-fq;
                                }
                                else{
                                    sum_vpq += gp-gq;
                                }
                                count_neighbors++;
                            }
                        }
                    }
                    fp = (sum_f + sum_fstar + sum_vpq)/(float)count_neighbors;
                    error = fabs(fp - cvmGet(im_new, i+offset[0], j+offset[1]));
                    if( ok && error > EPS * (1+fabs(fp))){
                        ok = 0;
                    }
                    cvmSet(im_new, i+offset[0], j+offset[1], fp);
                }
            }
        }
        if(ok){
            break;
        }
    }
    
    for(i=0; i<im_dst->height; i++){
        for(j=0; j<im_dst->width; j++){
            if(cvmGet(im_new, i, j) > 255){
                cvmSet(im_new, i, j, 255.0);
            }
            else if(cvmGet(im_new, i, j) < 0){
                cvmSet(im_new, i, j, 0.0);
            }
            im_dst->imageData[(i)*im_dst->widthStep + (j)*im_dst->nChannels + channel] = (uchar)cvmGet(im_new, i, j);
        }
    }
    return 1;
}
Example #11
0
void cvmSet3DPoint(CvMat* matrix, int row, int col, CvPoint3D32f point)
{
	cvmSet(matrix, row, col, point.x);
	cvmSet(matrix, row, col + 1, point.y);
	cvmSet(matrix, row, col + 2, point.z);
}
//パーティクルフィルタ
void particleFilter()
{
	int i, c;
	double w = 0.0, h = 0.0;
	cv::VideoCapture capture(0);
	//CvCapture *capture = 0;
	//capture = cvCreateCameraCapture (0);

	  int n_stat = 4;
	  int n_particle = 4000;
	  CvConDensation *cond = 0;
	  CvMat *lowerBound = 0;
	  CvMat *upperBound = 0;
	  int xx, yy;

	  capture >> capframe;

	//1フレームキャプチャし,キャプチャサイズを取得する.
	//frame = cvQueryFrame (capture);
	//redimage=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
	//greenimage=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
	//blueimage=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
	w = capframe.cols;
	h = capframe.rows;
	//w = frame->width;
    //h = frame->height;
	cv::namedWindow("Condensation", CV_WINDOW_AUTOSIZE);
	cv::setMouseCallback("Condensation", on_mouse, 0);
	//cvNamedWindow ("Condensation", CV_WINDOW_AUTOSIZE);
	//cvSetMouseCallback("Condensation",on_mouse,0);
 
 	//フォントの設定
	//CvFont dfont;
	//float hscale      = 0.7f;
	//float vscale      = 0.7f;
	//float italicscale = 0.0f;
	//int  thickness    = 1;
	//char text[255] = "";
	//cvInitFont (&dfont, CV_FONT_HERSHEY_SIMPLEX , hscale, vscale, italicscale, thickness, CV_AA); 

	//Condensation構造体を作成する.
	cond = cvCreateConDensation (n_stat, 0, n_particle);

	//状態ベクトル各次元の取りうる最小値・最大値を指定する
	//今回は位置(x,y)と速度(xpixcel/frame,ypixcel/frame)の4次元
	lowerBound = cvCreateMat (4, 1, CV_32FC1);
	upperBound = cvCreateMat (4, 1, CV_32FC1);
	cvmSet (lowerBound, 0, 0, 0.0);
	cvmSet (lowerBound, 1, 0, 0.0);
	cvmSet (lowerBound, 2, 0, -20.0);
	cvmSet (lowerBound, 3, 0, -20.0);
	cvmSet (upperBound, 0, 0, w);
	cvmSet (upperBound, 1, 0, h);
	cvmSet (upperBound, 2, 0, 20.0);
	cvmSet (upperBound, 3, 0, 20.0);
  
	//Condensation構造体を初期化する
	cvConDensInitSampleSet (cond, lowerBound, upperBound);

	//ConDensationアルゴリズムにおける状態ベクトルのダイナミクスを指定する
	cond->DynamMatr[0] = 1.0;
	cond->DynamMatr[1] = 0.0;
	cond->DynamMatr[2] = 1.0;
	cond->DynamMatr[3] = 0.0;
	cond->DynamMatr[4] = 0.0;
	cond->DynamMatr[5] = 1.0;
	cond->DynamMatr[6] = 0.0;
	cond->DynamMatr[7] = 1.0;
	cond->DynamMatr[8] = 0.0;
	cond->DynamMatr[9] = 0.0;
	cond->DynamMatr[10] = 1.0;
	cond->DynamMatr[11] = 0.0;
	cond->DynamMatr[12] = 0.0;
	cond->DynamMatr[13] = 0.0;
	cond->DynamMatr[14] = 0.0;
	cond->DynamMatr[15] = 1.0;
  
	//ノイズパラメータを再設定する.
	cvRandInit (&(cond->RandS[0]), -25, 25, (int) cvGetTickCount ());
	cvRandInit (&(cond->RandS[1]), -25, 25, (int) cvGetTickCount ());
	cvRandInit (&(cond->RandS[2]), -5, 5, (int) cvGetTickCount ());
	cvRandInit (&(cond->RandS[3]), -5, 5, (int) cvGetTickCount ());
	
	while (1) 
	{
		capture >> capframe;
		//frame = cvQueryFrame (capture);


		//各パーティクルについて尤度を計算する.
		for (i = 0; i < n_particle; i++)
		{ 
			xx = (int) (cond->flSamples[i][0]);
			yy = (int) (cond->flSamples[i][1]);
			 if (xx < 0 || xx >= w || yy < 0 || yy >= h) 
				{  
					cond->flConfidence[i] = 0.0;
				}
				else
				{  
					cond->flConfidence[i] = calc_likelihood (capframe, xx, yy);
					//cond->flConfidence[i] = calc_likelihood (frame, xx, yy);
				
					cv::circle(capframe, cv::Point(xx, yy), 1, CV_RGB(0, 255, 200));
					//cvCircle (frame, cvPoint (xx, yy), 1, CV_RGB (0, 255, 200), -1);
				}	 
		}

		//重みの総和&重心を求める
		double wx = 0, wy = 0;
		double sumWeight = 0;
		for (i = 0; i < n_particle; i++)
		{
			sumWeight += cond->flConfidence[i];
		}
		for (i = 0; i < n_particle; i++)
		{
			wx += (int) (cond->flSamples[i][0]) * (cond->flConfidence[i] / sumWeight);
			wy += (int) (cond->flSamples[i][1]) * (cond->flConfidence[i] / sumWeight);
		}

		//重心表示
		cv::circle(capframe, cv::Point((int)wx, (int)wy), 10, cv::Scalar(0, 0, 255));
		cv::circle(capframe, cv::Point(20, 20), 10, CV_RGB(red, green, blue), 6);
		cv::putText(capframe, "target", cv::Point(0, 50), cv::FONT_HERSHEY_SIMPLEX, 0.7, CV_RGB(red, green, blue));
		cv::imshow("Condensation", capframe);

		//cvCircle(frame,cvPoint(20,20),10,CV_RGB(red,green,blue),-1);
		//cvPutText(frame,"target",cvPoint(0,50),&dfont,CV_RGB(red,green,blue));
		//cvShowImage ("Condensation", frame);
		
		c = cv::waitKey(30);
		//c = cvWaitKey (30);
		if (c == 27)      break;
  
		//次のモデルの状態を推定する 
		cvConDensUpdateByTime (cond);

	}

	cv::destroyWindow("Condensation");
	//cvDestroyWindow ("Condensation");
	//cvReleaseCapture (&capture);

	//cvReleaseImage(&redimage);
	//cvReleaseImage(&greenimage);
	//cvReleaseImage(&blueimage);
	cvReleaseConDensation (&cond);

	cvReleaseMat (&lowerBound);
	cvReleaseMat (&upperBound);
}
CvMat* HomographyCalculationThread::calculateHomographicMatrix(double newPictureApexX[],double newPictureApexY[],double pictureApexXPosition[],double pictureApexYPosition[]) 
{
	CvMat* mmat = cvCreateMat(3,3,CV_32FC1);
	CvMat* a = cvCreateMat(POINTS*2,9,CV_32FC1);
	for(int count=1;count<POINTS+1;count++) {
		cvmSet(a,2*count-2,0,newPictureApexX[count-1]);
		cvmSet(a,2*count-2,1,newPictureApexY[count-1]);
		cvmSet(a,2*count-2,2,1);
		cvmSet(a,2*count-2,3,0);
		cvmSet(a,2*count-2,4,0);
		cvmSet(a,2*count-2,5,0);
		cvmSet(a,2*count-2,6,(-newPictureApexX[count-1]*pictureApexXPosition[count-1]));
		cvmSet(a,2*count-2,7,(-pictureApexXPosition[count-1]*newPictureApexY[count-1]));
		cvmSet(a,2*count-2,8,-pictureApexXPosition[count-1]);
		cvmSet(a,2*count-1,0,0);
		cvmSet(a,2*count-1,1,0);
		cvmSet(a,2*count-1,2,0);
		cvmSet(a,2*count-1,3,newPictureApexX[count-1]);
		cvmSet(a,2*count-1,4,newPictureApexY[count-1]);
		cvmSet(a,2*count-1,5,1);
		cvmSet(a,2*count-1,6,(-newPictureApexX[count-1]*pictureApexYPosition[count-1]));
		cvmSet(a,2*count-1,7,(-pictureApexYPosition[count-1]*newPictureApexY[count-1]));
		cvmSet(a,2*count-1,8,-pictureApexYPosition[count-1]);
	}
	CvMat* U  = cvCreateMat(8,8,CV_32FC1);
	CvMat* D  = cvCreateMat(8,9,CV_32FC1);
	CvMat* V  = cvCreateMat(9,9,CV_32FC1);
	CvMat* V22 = cvCreateMat(3,3,CV_32FC1);
	mHMatrix = cvCreateMat(3,3,CV_32FC1);
	CvMat* invH = cvCreateMat(3,3,CV_32FC1);
	cvSVD(a, D, U, V, CV_SVD_U_T|CV_SVD_V_T);

	for(int a=0;a<3;a++) {
		for(int b=0;b<3;b++) {
			cvmSet(mmat,a,b,cvmGet(V,8,a*3+b));
			cvmSet(V22,a,b,(1/cvmGet(V,8,4)));
		}
	}

	cvMul(mmat,V22,mHMatrix);
	cvInvert(mHMatrix,invH);
	cvReleaseMat(&U);
	cvReleaseMat(&D);
	cvReleaseMat(&V);
	cvReleaseMat(&V22);
	cvReleaseMat(&a);
	cvReleaseMat(&mmat);
	return invH;
}
Example #14
0
osg::Geode* myKeyboardEventHandler::Uncertainty()
{
    osg::Geode* geode = new osg::Geode();
    osg::Geometry* pointsGeom = new osg::Geometry();

    osg::Vec3Array* vertices = new osg::Vec3Array;

    CvMat *temp,*temp2,*temp3;
    temp=cvCreateMatHeader(6,6,CV_32FC1);
    CvMat *_temp = cvCreateMat (6,6,CV_32FC1);
    temp2=cvCreateMat(3,6,CV_32FC1);
    temp3=cvCreateMat(3,3,CV_32FC1);
    CvMat* vect=cvCreateMat (6,1,CV_32FC1);
    CvMat* res6=cvCreateMat (6,1,CV_32FC1);
    //CvMat* vect2=cvCreateMat(1,6,CV_32FC1);
    //CvPoint2D64f proj ;
    //CvMat* m = cvCreateMat(3,1,CV_32FC1);
    float *fstate;
    fstate= pSlam->X->data.fl;
    float xc,yc,zc,theta,phi,rho;

    for (unsigned int i = 0 ; i < pSlam->visible.size(); i++)
    {
        if(pSlam->visible[i]==true)
        {
            xc=fstate[pSlam->modelSD+6*i];
            yc=fstate[pSlam->modelSD+6*i+1];
            zc=fstate[pSlam->modelSD+6*i+2];
            theta=fstate[pSlam->modelSD+6*i+3];
            phi=fstate[pSlam->modelSD+6*i+4];
            rho=fstate[pSlam->modelSD+6*i+5];


            cvGetSubRect( pSlam->P,temp,cvRect(pSlam->modelSD+i*6,pSlam->modelSD+i*6,6,6) );

            for (int part=0; part<40; part++){
//                cvmSet(vect,0,0,pSlam->randomVector(-.005,.005));
//                cvmSet(vect,1,0,pSlam->randomVector(-.005,.005));
//                cvmSet(vect,2,0,pSlam->randomVector(-.005,.005));
//                cvmSet(vect,3,0,pSlam->randomVector(-.05,0.05));
//                cvmSet(vect,4,0,pSlam->randomVector(-.05,0.05));
//                cvmSet(vect,5,0,pSlam->randomVector(-1,1));
                cvmSet(vect,0,0,pSlam->randomVector(-12.6,12.6));
                cvmSet(vect,1,0,pSlam->randomVector(-12.6,12.6));
                cvmSet(vect,2,0,pSlam->randomVector(-12.6,12.6));
                cvmSet(vect,3,0,pSlam->randomVector(-12.6,12.6));
                cvmSet(vect,4,0,pSlam->randomVector(-12.6,12.6));
                cvmSet(vect,5,0,pSlam->randomVector(-12.6,12.6));
                cvZero(_temp);
                pSlam->Cholesky(temp,_temp);
                cvMatMul(_temp,vect,res6);

                double ox, oy, oz;
                pSlam->InverseDepth2Depth(cvmGet(res6,0,0)+xc,
                                          cvmGet(res6,1,0)+yc,
                                          cvmGet(res6,2,0)+zc,
                                          cvmGet(res6,3,0)+theta,
                                          cvmGet(res6,4,0)+phi,
                                          cvmGet(res6,5,0)+rho,
                                          &ox,&oy,&oz );

                //CvMat* nullmat = 0;

                if (ox> -100000 && ox<10000 &&
                    oy> -100000 && oy<10000 &&
                    oz> -100000 && oz<10000)
                {
                    CvPoint3D64f point = cvPoint3D64f(ox,oy,oz);
                    vertices->push_back(osg::Vec3(ox,oy,oz));
                }
            }
        }
    }

    // pass the created vertex array to the points geometry object.
    pointsGeom->setVertexArray(vertices);
    // create the color of the geometry, one single for the whole geometry.
    // for consistency of design even one single color must added as an element
    // in a color array.
    osg::Vec4Array* colors = new osg::Vec4Array;
    // add a white color, colors take the form r,g,b,a with 0.0 off, 1.0 full on.
    colors->push_back(osg::Vec4(1.0f,0.0f,0.0f,1.0f));
    // pass the color array to points geometry, note the binding to tell the geometry
    // that only use one color for the whole object.
    pointsGeom->setColorArray(colors);
    pointsGeom->setColorBinding(osg::Geometry::BIND_OVERALL);

    // set the normal in the same way color.
    osg::Vec3Array* normals = new osg::Vec3Array;
    normals->push_back(osg::Vec3(0.0f,-1.0f,0.0f));
    pointsGeom->setNormalArray(normals);
    pointsGeom->setNormalBinding(osg::Geometry::BIND_OVERALL);


    // create and add a DrawArray Primitive (see include/osg/Primitive).  The first
    // parameter passed to the DrawArrays constructor is the Primitive::Mode which
    // in this case is POINTS (which has the same value GL_POINTS), the second
    // parameter is the index position into the vertex array of the first point
    // to draw, and the third parameter is the number of points to draw.
    pointsGeom->addPrimitiveSet(new osg::DrawArrays(osg::PrimitiveSet::POINTS,0,vertices->size()));


    // add the points geometry to the geode.
    geode->addDrawable(pointsGeom);
    // Create a new StateSet with default settings:
    osg::StateSet* stateOne = new osg::StateSet();
    osg::Point *  point = new osg::Point();
    point->setSize(1.0);
    stateOne->setAttributeAndModes(point,osg::StateAttribute::ON);
    geode->setStateSet(stateOne);
    return geode;
}
void initiate_matrices_for_calibration(CvMat * intrinsicMatrix, CvMat * distortionCoeffs,
                                       CvMat * nbTotalCorners, CvMat * objectRealCoordinates,
                                       CvMat * cornersMat, CvPoint2D32f * cornersList)
{
    int i,j,t;

    /*////////////// Initialisation of intrinsic matrix //////////////////////////
    Focal lengths set to 1 => corresponds to the camera's aspect (normal) ratio
    Matrix aspect:
    [fx 0  Cx]
    [0  fy Cy]
    [0  0   1] */
    cvmSet(intrinsicMatrix,0,0 , 1); //fx
    cvmSet(intrinsicMatrix,1,1 , 1); //fy

    /*/////////////   CvMat * possible accessors  //////////////////////////
    => Float Matrix only:
    void cvmSet( CvMat* mat, int row, int col, double value )
    double cvmGet( const CvMat* mat, int row, int col )
    void cvSetReal2D(CvArr* arr, int idx0, int idx1, double value)
    double cvGetReal2D(CvArr* arr, int idx0, int idx1)

    => All:
    CV_MAT_ELEM( *data, double, x, y) = 1     */

    /*/////////////// Initialisation of distortion coeffs ////////////////////////*/
    cvmSet(distortionCoeffs,0,0,  0); // k1
    cvmSet(distortionCoeffs,1,0,  0); // k2
    cvmSet(distortionCoeffs,2,0,  0); // p1
    cvmSet(distortionCoeffs,3,0,  0); // p2
    // cvmSet(distortionCoeffs,4,0,  0); // k3

	for (t=0;t<nbImages;t++) {
		for (j=0;j<nbLines;j++) {   // 3D-matrives stored in 1D-ones (index: columnN° + offset1*LineN° + offset2*imageN°)
			for (i=0;i<nbColumns;i++) {

                /*/////////////// Description of the real chessboard matrix ////////////////////////
                Here we initialise the pattern object's real 2D-coordinates, specified in the chessboard plan
                Coordinates of a 6x8 chessboard with square of 1[unit] for a single image:
                [(0;0) (0;1) (0;2) ... (0;7)]
                [(1;0) (1;1) (1;2) ... (1;7)]
                [       ...                 ]
                [(4;0) (4;1) (4;2) ... (4;7)]
                That implies the calibration result will be in [unit] too  */
				cvmSet(objectRealCoordinates, t*nbCorners +j*nbColumns +i , 0 , j *ratioPixelOverUnit); // x (squares of 3x3cm)
				cvmSet(objectRealCoordinates, t*nbCorners +j*nbColumns +i , 1 , i *ratioPixelOverUnit); // y
				cvmSet(objectRealCoordinates, t*nbCorners +j*nbColumns +i , 2 , 0); // z=0

                /*///////// Conversion from CvPoint2D32f to CvMat for corners coordinates //////////////*/
                cvmSet(cornersMat, t*nbCorners +j*nbColumns +i , 0 , cornersList[t*nbCorners +j*nbColumns +i].x);
                cvmSet(cornersMat, t*nbCorners +j*nbColumns +i , 1 , cornersList[t*nbCorners +j*nbColumns +i].y);

                //printf("(%.1f;%.1f)\t",cvmGet(cornersMat_Left,t*nbImages +j*nbColumns +i,0),cvmGet(cornersMat_Left,t*nbImages +j*nbColumns +i,1));
			}
		}
		CV_MAT_ELEM( *nbTotalCorners, int, t, 0)= nbCorners;  // save nb of corners for each image
	}
}
Example #16
0
CV_IMPL void
cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
                             CvMat *rotMatr, CvMat *posVect,
                             CvMat *rotMatrX, CvMat *rotMatrY,
                             CvMat *rotMatrZ, CvPoint3D64f *eulerAngles)
{
    CvMat *tmpProjMatr = 0;
    CvMat *tmpMatrixD = 0;
    CvMat *tmpMatrixV = 0;
    CvMat *tmpMatrixM = 0;

    CV_FUNCNAME("cvDecomposeProjectionMatrix");
    __BEGIN__;

    /* Validate parameters. */
    if(projMatr == 0 || calibMatr == 0 || rotMatr == 0 || posVect == 0)
        CV_ERROR(CV_StsNullPtr, "Some of parameters is a NULL pointer!");

    if(!CV_IS_MAT(projMatr) || !CV_IS_MAT(calibMatr) || !CV_IS_MAT(rotMatr) || !CV_IS_MAT(posVect))
        CV_ERROR(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");

    if(projMatr->cols != 4 || projMatr->rows != 3)
        CV_ERROR(CV_StsUnmatchedSizes, "Size of projection matrix must be 3x4!");

    if(calibMatr->cols != 3 || calibMatr->rows != 3 || rotMatr->cols != 3 || rotMatr->rows != 3)
        CV_ERROR(CV_StsUnmatchedSizes, "Size of calibration and rotation matrices must be 3x3!");

    if(posVect->cols != 1 || posVect->rows != 4)
        CV_ERROR(CV_StsUnmatchedSizes, "Size of position vector must be 4x1!");

    CV_CALL(tmpProjMatr = cvCreateMat(4, 4, CV_64F));
    CV_CALL(tmpMatrixD = cvCreateMat(4, 4, CV_64F));
    CV_CALL(tmpMatrixV = cvCreateMat(4, 4, CV_64F));
    CV_CALL(tmpMatrixM = cvCreateMat(3, 3, CV_64F));

    /* Compute position vector. */

    cvSetZero(tmpProjMatr); // Add zero row to make matrix square.
    int i, k;
    for(i = 0; i < 3; i++)
        for(k = 0; k < 4; k++)
            cvmSet(tmpProjMatr, i, k, cvmGet(projMatr, i, k));

    CV_CALL(cvSVD(tmpProjMatr, tmpMatrixD, NULL, tmpMatrixV, CV_SVD_MODIFY_A + CV_SVD_V_T));

    /* Save position vector. */

    for(i = 0; i < 4; i++)
        cvmSet(posVect, i, 0, cvmGet(tmpMatrixV, 3, i)); // Solution is last row of V.

    /* Compute calibration and rotation matrices via RQ decomposition. */

    cvGetCols(projMatr, tmpMatrixM, 0, 3); // M is first square matrix of P.

    assert(cvDet(tmpMatrixM) != 0.0); // So far only finite cameras could be decomposed, so M has to be nonsingular [det(M) != 0].

    CV_CALL(cvRQDecomp3x3(tmpMatrixM, calibMatr, rotMatr, rotMatrX, rotMatrY, rotMatrZ, eulerAngles));

    __END__;

    cvReleaseMat(&tmpProjMatr);
    cvReleaseMat(&tmpMatrixD);
    cvReleaseMat(&tmpMatrixV);
    cvReleaseMat(&tmpMatrixM);
}
Example #17
0
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
	//bridge that will transform the message (image) from ROS code back to "image" code
  sensor_msgs::CvBridge bridge;
  fprintf(stderr, "\n callBaack funtion \n");
  //publish data (obstacle waypoints) back to the boat
  //ros::NodeHandle n;
  //std_msgs::Float32 xWaypoint_msg;         // X coordinate obstacle message
  //std_msgs::Float32 yWaypoint_msg;         // Y coordinate obstacle message
  //publish the waypoint data             
  //ros::Publisher waypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //ros::Publisher Ywaypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //std::stringstream ss;
  
  /***********************************************************************/
  //live image coming streamed straight from the boat's camera
  IplImage* boatFront = bridge.imgMsgToCv(msg, "bgr8");
  IplImage* backUpImage = bridge.imgMsgToCv(msg, "bgr8");
  boatFront->origin = IPL_ORIGIN_TL;   //sets image origin to top left corner
  //Crop the image to the ROI
  //cvSetImageROI(boatFront, cvRect(0,0,boatFront->height/0.5,boatFront->width/1.83));
  int X = boatFront->height;
  int Y = boatFront->width;
  /***********************************************************************/
  //boat's edge distance from the camera. This is used for visual calibration
  //to know the distance from the boat to the nearest obstacles.
  //With respect to the mounted camera, distance is 21 inches (0.5334 m) side to side
  //and 15 inches (0.381 m).
  //float boatFrontDistance = 0.381;    //distance in meters
  //float boatSideDistance = 0.5334;    //distance in meters
  
  // These variables tell the distance from the center bottom of the image
  // (the camera) to the square surrounding a the obstacle
  float xObstacleDistance = 0.0;
  float yObstacleDistance = 0.0;
  float obstacleDistance = 0.0;
  
  int pixelsNumber = 6;  //number of pixels for an n x n matrix and # of neighbors
  const int arraySize = pixelsNumber;
  const int threeArraySize = pixelsNumber;
  //if n gets changed, then the algorithm might have to be
  //recalibrated. Try to keep it constant
  //these variables are used for the k nearest neighbors
  //int accuracy;
  //reponses for each of the classifications
  float responseWaterH, responseWaterS, responseWaterV; 
  float responseGroundH, responseGroundS, responseGroundV;
  //float responseSkyH, responseSkyS, responseSkyV;
  float averageHue = 0.0;
  float averageSat = 0.0;
  float averageVal = 0.0;
  CvMat* trainClasses = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
  CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
  //for (int i = 0; i < pixelsNumber/2; i++)
  //{
    //  cvmSet(trainClasses, i,0,1);
     // cvmSet(trainClasses2, i,0,1);
  //}
  //for (int i = pixelsNumber/2; i < pixelsNumber; i++)
  //{
    //  cvmSet(trainClasses, i,0,2);
     // cvmSet(trainClasses2, i,0,2);
  //}
  //for (int i =0; i<pixelsNumber;i++)
  //{
    //   cout << cvmGet(trainClasses,i,0);
      // cout << cvmGet(trainClasses2,i,0);   
  //}
  //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
  //used with the classifier 
  CvMat* nearestWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //Distance
  //CvMat* distanceWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //these variables are use to traverse the picture by blocks of n x n pixels at
  //a time. 
  //Index(0,0) does not exist, so make sure kj and ki start from 1 (in the
  //right way, of course)
  //x and y are the dimensions of the local patch of pixels
  int x = (boatFront->height)/4; //(boatFront->height)/2.5 + pixelsNumber + 99; 
  int y = pixelsNumber-1; 
  int ix = 0; 
  int iy = 0; 
  int skyX = 0; 
  int skyY = 0;
  //M controls the x axis (up and down); N controls the y axis (left and
  //right)
  int Mw = -550; 
  int Nw = 1300; 
  int Mg = -350; 
  int Ng = 700;
  int row1 = 0;
  int column1 = 0;
  int row2 = 0;
  int column2 = 0;
  //ground sample
  //CvMat* groundTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //CvMat* groundTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //CvMat* groundTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //water sample
  CvMat* waterTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  CvMat* waterTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  CvMat* waterTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //n x n sample patch taken from the picture
  CvMat* sampleHue = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* sampleSat = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* sampleVal = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* resampleHue = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* resampleSat = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* resampleVal = cvCreateMat(arraySize,arraySize,CV_32FC1);
  //sky training sample
  CvMat* skyTrainingHue = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* skyTrainingSat = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* skyTrainingVal = cvCreateMat(arraySize,arraySize,CV_32FC1);
  //initialize each matrix element to zero for ease of use
  //cvZero(groundTrainingHue);
  //cvZero(groundTrainingSat);
  //cvZero(groundTrainingVal);
  cvZero(waterTrainingHue);
  cvZero(waterTrainingSat);
  cvZero(waterTrainingVal);
  cvZero(sampleHue);
  cvZero(sampleSat);
  cvZero(sampleVal);
  cvZero(resampleHue);
  cvZero(resampleSat);
  cvZero(resampleVal);
  cvZero(skyTrainingHue);
  cvZero(skyTrainingSat);
  cvZero(skyTrainingVal);    
  //Stores the votes for each channel (whether it belongs to water or not
  //1 is part of water, 0 not part of water
  //if sum of votes is bigger than 1/2 the number of elements, then it belongs to water
  int votesSum = 0;
  int comparator[3];        //used when only three votes are needed
  //int comparatorTwo [3][3];    //used when six votes are needed
  //initial sum of votes is zero
  //Error if initialize both matrices inside a single for loop. Dont know why
 // for(int i = 0; i < 3; i++)
  //{   
      //comparator[i] = 0;
    //  for(int j = 0; j < 3; j++)
     // {
       //   comparatorTwo[i][j] = 0;
     // }
  //}
  for(int i = 0; i < 3; i++)
  {   
      comparator[i] = 0;
  }
  
  /***********************************************************************/
  //Convert from RGB to HSV to control the brightness of the objects.
  //work with reflexion
  /*Sky recognition. Might be useful for detecting reflexion on the water. If
    the sky is detected, and the reflection has the same characteristics of
    something below the horizon, that "something" might be water. Assume sky
    wont go below the horizon*/
  
  //convert from RGB to HSV
  cvCvtColor(boatFront, boatFront, CV_BGR2HSV);
  cvCvtColor(backUpImage, backUpImage, CV_BGR2HSV);
  HsvImage I(boatFront);
  HsvImage IBackUp(backUpImage);
  fprintf(stderr,"\n About to do Sky detection\n");
  //Sky detection
  /*for (int i=0; i<boatFront->height/3;i++)
  {
      for (int j=0; j<boatFront->width;j++)
      {
      //if something is bright enough, consider it sky and store the
      //value. HSV values go from 0 to 180 ... RGB goes from 0 to 255
          if (((I[i][j].v >= 180) && (I[i][j].s <= 16)))
              // && ((I[i][j].h >=10)))) //&& (I[i][j].h <= 144))))
          {
              //The HSV values vary between 0 and 1
              cvmSet(skyTrainingHue,skyX,skyY,I[i][j].h);
              cvmSet(skyTrainingSat,skyX,skyY,I[i][j].s);
              cvmSet(skyTrainingVal,skyX,skyY,I[i][j].v);
              I[i][j].h = 0.3*180;       //H (color)
              I[i][j].s = 0.3*180;          //S (color intensity)
              I[i][j].v = 0.6*180;          //V (brightness)
              if (skyY == pixelsNumber-1)
              {
                 if (skyX == pixelsNumber-1)
                   skyX = 0;
                 else
                   skyX = skyX + 1;
                 skyY = 0;
              }
              else
                skyY = skyY + 1;
         }   
      }
  }*/
  
  /***********************************************************************/
  //offline input pictures. Samples of water properties are taken from these 
  //pictures to get a range of values for H, S, V that will be stored into a 
  //pre-defined classifier
  IplImage* imageSample1 = cvLoadImage("bigObstacle.jpg");
  cvSetImageROI(imageSample1, cvRect(0,0,imageSample1->height/0.5,imageSample1->width/1.83));
  cvCvtColor(imageSample1, imageSample1, CV_BGR2HSV);
  HsvImage I1(imageSample1);
  IplImage* imageSample2 = cvLoadImage("bigObstacle2.jpg");
  cvSetImageROI(imageSample2, cvRect(0,0,imageSample2->height/0.5,imageSample2->width/1.83));
  cvCvtColor(imageSample2, imageSample2, CV_BGR2HSV);
  HsvImage I2(imageSample2);
  IplImage* imageSample3 = cvLoadImage("bigObstacle3.jpg");
  cvSetImageROI(imageSample3, cvRect(0,0,imageSample3->height/0.5,imageSample3->width/1.83));
  cvCvtColor(imageSample3, imageSample3, CV_BGR2HSV);
  HsvImage I3(imageSample3);
  IplImage* imageSample4 = cvLoadImage("river.jpg");
  cvSetImageROI(imageSample4, cvRect(0,0,imageSample4->height/0.5,imageSample4->width/1.83));
  cvCvtColor(imageSample4, imageSample4, CV_BGR2HSV);
  HsvImage I4(imageSample4);
  IplImage* imageSample5 = cvLoadImage("river2.jpg");
  cvSetImageROI(imageSample5, cvRect(0,0,imageSample5->height/0.5,imageSample5->width/1.83));
  cvCvtColor(imageSample5, imageSample5, CV_BGR2HSV);
  HsvImage I5(imageSample5);
  IplImage* imageSample6 = cvLoadImage("roundObstacle4.jpg");
  cvSetImageROI(imageSample6, cvRect(0,0,imageSample6->height/0.5,imageSample6->width/1.83));
  cvCvtColor(imageSample6, imageSample6, CV_BGR2HSV);
  HsvImage I6(imageSample6);
  IplImage* imageSample7 = cvLoadImage("farm.jpg");
  cvSetImageROI(imageSample7, cvRect(0,0,imageSample7->height/0.5,imageSample7->width/1.83));
  cvCvtColor(imageSample7, imageSample7, CV_BGR2HSV);
  HsvImage I7(imageSample7);
  IplImage* imageSample8 = cvLoadImage("bigObstacle4.jpg");
  cvSetImageROI(imageSample8, cvRect(0,0,imageSample8->height/0.5,imageSample8->width/1.83));
  cvCvtColor(imageSample8, imageSample8, CV_BGR2HSV);
  HsvImage I8(imageSample8);
  IplImage* imageSample9 = cvLoadImage("roundObstacle6.jpg");
  cvSetImageROI(imageSample9, cvRect(0,0,imageSample9->height/0.5,imageSample9->width/1.83));
  cvCvtColor(imageSample9, imageSample9, CV_BGR2HSV);
  HsvImage I9(imageSample9);
  IplImage* imageSample10 = cvLoadImage("roundObstacle.jpg");
  cvSetImageROI(imageSample10, cvRect(0,0,imageSample10->height/0.5,imageSample10->width/1.83));
  cvCvtColor(imageSample10, imageSample10, CV_BGR2HSV);
  HsvImage I10(imageSample10);
  fprintf(stderr,"\n Grab water samples\n");
  //grab water samples from each picture
  for (int i=0; i < threeArraySize; i++)
  {
  	fprintf(stderr,"\n patch is pink (this is for me to know where the ground patch sample is\n");
      for (int j=0; j < arraySize; j++)
      {
          row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455);
          row1 = x + i;
        	//if (row1 > X-1)
	          //  row1 = X-1;
          column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615);
         // if (column1 > Y-1)
          	//	column1 = Y-1;
          averageHue = (I1[row1][column1].h + I2[row1][column1].h + I3[row1][column1].h + I4[row1][column1].h + I5[row1][column1].h + 	
          I6[row1][column1].h + I7[row1][column1].h + I8[row1][column1].h + I9[row1][column1].h + I10[row1][column1].h) / 10;
          averageSat = (I1[row1][column1].s + I2[row1][column1].s + I3[row1][column1].s + I4[row1][column1].s + I5[row1][column1].s + 
          I6[row1][column1].s + I7[row1][column1].s + I8[row1][column1].s + I9[row1][column1].s + I10[row1][column1].s) / 10;
          averageVal = (I1[row1][column1].v + I2[row1][column1].v + I3[row1][column1].v + I4[row1][column1].v + I5[row1][column1].v + 
          I6[row1][column1].v + I7[row1][column1].v + I8[row1][column1].v + I9[row1][column1].v + I10[row1][column1].v) / 10;   
          fprintf(stderr,"\n water patch sample (n X n matrix)\n");
          cvmSet(waterTrainingHue,i,j,averageHue);
          cvmSet(waterTrainingSat,i,j,averageSat);
          cvmSet(waterTrainingVal,i,j,averageVal);  
          fprintf(stderr,"\n patch is red (this is for me to know where the ground patch sample is\n");
          //I[row1][column1].h = 0;
          //I[row1][column1].s = 255;
          //I[row1][column1].v = 255;
      }
  }
  
  fprintf(stderr,"\n Order water samples in ascending\n");
  //order the water samples in ascending order on order to know a range
  cvSort(waterTrainingHue, waterTrainingHue, CV_SORT_ASCENDING);
  cvSort(waterTrainingSat, waterTrainingSat, CV_SORT_ASCENDING);
  cvSort(waterTrainingVal, waterTrainingVal, CV_SORT_ASCENDING);
  // find the maximum and minimum values in the array to create a range
  int maxH = cvmGet(waterTrainingHue,0,0);
  int maxS = cvmGet(waterTrainingSat,0,0);
  int maxV = cvmGet(waterTrainingVal,0,0);
  int minH = cvmGet(waterTrainingHue,0,0);
  int minS = cvmGet(waterTrainingSat,0,0);
  int minV = cvmGet(waterTrainingVal,0,0);
  for (int i=0; i < threeArraySize; i++)
  {
      for (int j=0; j < arraySize; j++)
      {
          if (cvmGet(waterTrainingHue,i,j) > maxH)
              maxH = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingSat,i,j) > maxS)
              maxS = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingVal,i,j) > maxV)
              maxV = cvmGet(waterTrainingVal,i,j);
          if (cvmGet(waterTrainingHue,i,j) < minH)
              minH = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingSat,i,j) < minS)
              minS = cvmGet(waterTrainingSat,i,j);
          if (cvmGet(waterTrainingVal,i,j) < minV)
              minV = cvmGet(waterTrainingVal,i,j);
      }
  }
	
	/***********************************************************************/
  //Grab a random patch of water below the horizon and compare every other
  //pixel against it
  //The results of the water detection depend on where in the picture the
  //training samples are located. Maybe adding more training samples will
  //help improve this?
  fprintf(stderr,"\n Random patch\n");
  /*for (int i=0; i < threeArraySize; i++)
  {
      for (int j=0; j < arraySize; j++)
      {
          row2 = ceil(X/4.7291)+ceil(X/8.3176)+i+Mg;
          column2 = ceil(Y/7.78378)+ceil(Y/16.54468)+j+Ng;
      //ground patch sample (n X n matrix)
      //Detecting the horizon in the picture might be an excellent visual aid to
      //choose where (above the horizon) you can take a ground training(1:3*n,1:n)g sample
      //from. The ground pixel sample can be at a constant distance from the
      //horizon
          cvmSet(groundTrainingHue,i,j,I[row2][column2].h);
          cvmSet(groundTrainingSat,i,j,I[row2][column2].s);
          cvmSet(groundTrainingVal,i,j,I[row2][column2].v);   
      //patch is red (this is for me to know where the ground patch sample is)
          I[row2][column2].h = 60; 
          I[row2][column2].s = 180;
          I[row2][column2].v = 90;
      }
  }
  //order the water samples in ascending order on order to know a range
  cvSort(groundTrainingHue, groundTrainingHue, CV_SORT_ASCENDING);
  cvSort(groundTrainingSat, groundTrainingSat, CV_SORT_ASCENDING);
  cvSort(groundTrainingVal, groundTrainingVal, CV_SORT_ASCENDING);
  */ 
  // Main loop. It traverses through the picture
  skyX = 0; 
  skyY = 0;
  
  //The distance formula calculated by plotting points is given by:
  /*********** distance = 0.0006994144*(1.011716711^x)     *****************/
  //cout << "Distance: " << distancePixels << endl;
  fprintf(stderr,"\n Painting water red!!!!!\n");
  while (x < boatFront->height/1.2)
  {
      //get a random sample taken from the picture. Must be determined whether
      //it is water or ground
      for (int i = 0; i<pixelsNumber;i++)
      {
          cvmSet(sampleHue,0,i,I[x][y].h);
          cvmSet(sampleSat,0,i,I[x][y].s);
          cvmSet(sampleVal,0,i,I[x][y].v);
      }
      //Find the shortest distance between a pixel and the neighbors from each of
      //the training samples (sort of inefficient, but might do the job...sometimes)
      //if (ix == pixelsNumber-1)
      //{
          //HSV for water sample
          // learn classifier
          //CvKNearest knn(trainData, trainClasses, 0, false, itemsNumber);
          //CvKNearest knnWaterHue(waterTrainingHue, trainClasses, 0, false, pixelsNumber);
          //CvKNearest knnWaterSat(waterTrainingSat, trainClasses, 0, false, pixelsNumber);
          //CvKNearest knnWaterVal(waterTrainingVal, trainClasses, 0, false, pixelsNumber);
          //HSV for ground sample
          //CvKNearest knnGroundHue(groundTrainingHue, trainClasses2, 0, false, pixelsNumber);
          //CvKNearest knnGroundSat(groundTrainingSat, trainClasses2, 0, false, pixelsNumber);
          //CvKNearest knnGroundVal(groundTrainingVal, trainClasses2, 0, false, pixelsNumber);
          //HSV for sky sample
          //if (cvmGet(skyTrainingHue,0,0)!=0.0 && cvmGet(skyTrainingSat,0,0)!=0.0 && cvmGet(skyTrainingVal,0,0)!=0.0)
          //{
            //  CvKNearest knnSkyHue(skyTrainingHue, trainClasses, 0, false, pixelsNumber);
              //CvKNearest knnSkySat(skyTrainingSat, trainClasses, 0, false, pixelsNumber);
              //CvKNearest knnSkyVal(skyTrainingVal, trainClasses, 0, false, pixelsNumber);
          //}
          
          //scan nearest neighbors to each pixel
          //responseWaterH = knnWaterHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestWaterH,0);
          //responseWaterS = knnWaterSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestWaterS,0);
          //responseWaterV = knnWaterVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestWaterV,0);
          //responseGroundH = knnGroundHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestGroundH,0);
          //responseGroundS = knnGroundSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestGroundS,0);
          //responseGroundV = knnGroundVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestGroundV,0);
              for (int i=0;i<pixelsNumber;i++)
              {
                  for (int j=0;j<pixelsNumber;j++)
                  {
                      if ((minH <= cvmGet(sampleHue,0,j)) || (maxH >= cvmGet(sampleHue,0,j)))
                          //mark water samples as green
                          comparator[0] = 1;
                      else
                          comparator[0] = 0;
                    if (((minS <= cvmGet(sampleSat,0,j)) || (maxS <= cvmGet(sampleSat,0,j))))
                      //mark water samples as green
                          comparator[1] = 1;
                      else
                          comparator[1] = 0;
                      if ((minV <= cvmGet(sampleVal,0,j)) || (maxV <= cvmGet(sampleVal,0,j)))
                      //mark water samples as green
                          comparator[2] = 1;
                      else
                          comparator[2] = 0;
                      //count votes
                      for (int i3=0; i3 < 3; i3++)
                          votesSum = votesSum + comparator[i3];
                      //sky detection 
                      if (votesSum > 1) //&& ((sampleSat[i][j] - sampleVal[i][j]) <= 0.1*180)
                      {
                      // classify pixel as water 
                          I[x-pixelsNumber+i][y-pixelsNumber+j].h = 0;
                          I[x-pixelsNumber+i][y-pixelsNumber+j].s = 255;
                          I[x-pixelsNumber+i][y-pixelsNumber+j].v = 255;
                      }
                      votesSum = 0;
                  }
              }
          if (y < Y-1)
              y = y + pixelsNumber-1;
          if (y > Y-1)
              y = Y-1;
          else if (y == Y-1)
          {
              x = x + pixelsNumber-1;
              y = pixelsNumber-1;
          }
          //ix = 0;
  }
  //traverse through the image one more time, divide the image in grids of
    // 500x500 pixels, and see how many pixels of water are in each grid. If
    // most of the pixels are labeled water, then mark all the other pixels
    // as water as well    
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    int xDivisor = 100;
    int yDivisor = 100;
    votesSum = 0;
    column1 = 0;
    row1 = 0;
    x = ceil(boatFront->height/2.5);
    obstacleDistance = x;
    y = 0;
    int counter = 0;
    
    //The problem lies somewhere below this line!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    while (x < boatFront->height/1.2)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i < ceil(boatFront->height/xDivisor); i++)
        {
            for(int j = 0; j < ceil(boatFront->width/yDivisor); j++)
            {
                cvmSet(resampleHue,i,j,I[x+i][y+j].h);
                cvmSet(resampleSat,i,j,I[x+i][y+j].s);
                cvmSet(resampleVal,i,j,I[x+i][y+j].v);
                if(cvmGet(resampleHue,i,j)==0 && cvmGet(resampleSat,i,j)==255 && cvmGet(resampleVal,i,j)==255)
                {
                    votesSum++;
                }
            }
        }
        if (votesSum > ((boatFront->height/xDivisor)*(boatFront->width/yDivisor)*(8.9/9)))
        {   
        // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water  
        // We might need to use other smaller quantities (like 5/6 maybe?)
            for (int i = 0; i < ceil(boatFront->height/xDivisor);i++)
            {
                for (int j = 0; j < ceil(boatFront->width/yDivisor); j++)
                {
                    row1 = x + i;
                    if (row1 > X-1)
                        row1 = X-1;
                    column1 = y+j;
                    I[row1][column1].h = 0;
                    I[row1][column1].s = 0;
                    I[row1][column1].v = 0;
                }
            }
        }
        else
        {   
        // If not water, eliminate all red pixels and turn those pixels
        // back to the original color. These pixels shall, then, be marked
        // as obstacles
            for (int i = 0; i < ceil(boatFront->height/xDivisor);i++)
            {
                for (int j = 0; j < ceil(boatFront->width/yDivisor); j++)
                {
                    row1 = x + i;
                    if (row1 > X-1)
                        row1 = X-1;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    //the darker the color, the closer the object to the boat
                    //I[row1][column1].h = 128;    
                    //I[row1][column1].s = 255;   
                    //I[row1][column1].v = 255 - counter;
                    I[row1][column1].h = IBackUp[row1][column1].h;
                    I[row1][column1].s = IBackUp[row1][column1].s;
                    I[row1][column1].v = IBackUp[row1][column1].v;
                    //counter = counter + 20;
                }
            }
            //The distance formula calculated by plotting points is given by:
    /***********  distance = (1.76e-11)*pow(pixels,3.99)  *****************/
    /***********  pixel = (513.9332077469)pow(distance,0.240675506  *****************/
    
            // Convert from pixel distance to normal distance in meters
            if(obstacleDistance > sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2)))
            {
                // x,y coordinates of the obstacle
                xObstacleDistance = (1.76e-11)*pow(((boatFront->height/xDivisor)+x)/2, 3.99) ;
                yObstacleDistance = (1.76e-11)*pow(((boatFront->width/yDivisor)+y)/2, 3.99);
                //xWaypoint_msg = xObstacleDistance;
                //yWaypoint_msg = yObstacleDistance;
                //publish position data
                //waypoint_info_pub.publish(xWaypoint_msg);
                //waypoint_info_pub.publish(yWaypoint_msg);
                //ROS_INFO("Obstacle coordinates: X = %f meters, Y = %f meters", xObstacleDistance, yObstacleDistance);  
                obstacleDistance = sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2));
                //ROS_INFO("Obstacle distance from: %f", obstacleDistance);
            }
            //cout << "Distance to Obstacle is: " << obstacleDistance << endl << endl;
            
        }
        y = y + boatFront->width/xDivisor;
        if (y > Y-1)
        {
            x = x + boatFront->height/yDivisor;
            y = 0;
            counter = counter + 30;
        }
        votesSum = 0;
    }
    
  fprintf(stderr,"\n About to color\n");
  cvCvtColor(boatFront, boatFront, CV_HSV2BGR);
  //cvCvtColor(backUpImage, backUpImage, CV_HSV2BGR);
  /**************************************************************************/
	try
  {
  	fprintf(stderr,"\n boatFront\n");
    cvShowImage("Boat Front", boatFront);
  }
  catch (sensor_msgs::CvBridgeException& e)
  {
    ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
  }
}
Example #18
0
void CDataOut::Disp_out(IplImage *framecopy)
{
  char ndisp[100];
  sprintf(ndisp,(resdir+"disp%d.txt").c_str(),iter++);
  DispFile.open(ndisp);
  int ii=0;
  CvMat *temp,*temp2,*temp3;
  temp=cvCreateMatHeader(6,6,CV_32FC1);
  temp2=cvCreateMat(3,6,CV_32FC1);
  temp3=cvCreateMat(3,3,CV_32FC1);
  CvMat* vect=cvCreateMat (6,1,CV_32FC1);
  CvMat* res6=cvCreateMat (6,1,CV_32FC1);
  CvMat* vect2=cvCreateMat(1,6,CV_32FC1);
  CvMat* proj = cvCreateMat(4,2,CV_32FC1);    ///NOT RELEASED
  CvMat* m = cvCreateMat(3,1,CV_32FC1);

  for ( list<CElempunto*>::iterator It=pEstimator->pMap->bbdd.begin();
	    It != pEstimator->pMap->bbdd.end(); It++ )
    {
      if((*It)->state>2)
	  {

	  cvmSet(m,0,0,cos((*It)->theta)*sin((*It)->phi));
	  cvmSet(m,1,0,-sin((*It)->theta));
	  cvmSet(m,2,0,cos((*It)->theta)*cos((*It)->phi));
	  cvNormalize( m, m);

	/*  float xreal=(*It)->wx +cvmGet(m,0,0)/(*It)->rho;
	  float yreal=(*It)->wy +cvmGet(m,1,0)/(*It)->rho;
	  float zreal=(*It)->wz +cvmGet(m,2,0)/(*It)->rho;
*/
    CvMat *pCovMat = pEstimator->getCovMat();

	  if (12+ii*6< pCovMat->width && 12+ii*6< pCovMat->height)
	    {

	      cvGetSubRect( pCovMat,temp,cvRect(12+ii*6,12+ii*6,6,6) );

        for (int part=0; part<40; part++){
		  cvmSet(vect,0,0,randomVector(-.005,.005));
  		  cvmSet(vect,1,0,randomVector(-.005,.005));
  		  cvmSet(vect,2,0,randomVector(-.005,.005));
		  cvmSet(vect,3,0,randomVector(-.005,0.005));
		  cvmSet(vect,4,0,randomVector(-.005,0.005));
		  cvmSet(vect,5,0,randomVector(-1,1));

          cvMatMul(temp,vect,res6);

          cvmSet(m,0,0,cos(cvmGet(res6,3,0)+(*It)->theta)*sin(cvmGet(res6,4,0)+(*It)->phi));
  		  cvmSet(m,1,0,-sin(cvmGet(res6,3,0)+(*It)->theta));
  		  cvmSet(m,2,0,cos(cvmGet(res6,3,0)+(*It)->theta)*cos(cvmGet(res6,4,0)+(*It)->phi));
    	  cvNormalize( m, m);
    	  cvmSet (vect2,0,0,((cvmGet(res6,0,0)+(*It)->wx)+cvmGet(m,0,0)/(cvmGet(res6,5,0)+(*It)->rho)));
    	  cvmSet(vect2,0,1,((cvmGet(res6,1,0)+(*It)->wy)+cvmGet(m,1,0)/(cvmGet(res6,5,0)+(*It)->rho)));
   	  cvmSet(vect2,0,2,((cvmGet(res6,2,0)+(*It)->wz)+cvmGet(m,2,0)/(cvmGet(res6,5,0)+(*It)->rho)));

        DispFile<<cvmGet(vect2,0,0)<<" ";
        DispFile<<cvmGet(vect2,0,1)<<" ";
        DispFile<<cvmGet(vect2,0,2)<<" ";
        DispFile<<ii<<endl;
          cvmSet(vect2,0,0,cvmGet(res6,0,0)+(*It)->wx);
          cvmSet(vect2,0,1,cvmGet(res6,1,0)+(*It)->wy);
          cvmSet(vect2,0,2,cvmGet(res6,2,0)+(*It)->wz);
          cvmSet(vect2,0,3,cvmGet(res6,3,0)+(*It)->theta);
          cvmSet(vect2,0,4,cvmGet(res6,4,0)+(*It)->phi);
          cvmSet(vect2,0,5,cvmGet(res6,5,0)+(*It)->rho);

             pModelCam->cvProject_1_pto(vect2,proj,NULL,NULL,NULL);
             if (framecopy != NULL){
    		   cvCircle (framecopy,cvPoint((int)cvmGet(proj,0,0),(int)cvmGet(proj,0,1)),1,cvScalar(0,0,255),1 );
             }
   	      }

	    }
	  }
      ii++;
    }
    cvReleaseMatHeader(&temp);
    cvReleaseMat(&temp2);
    cvReleaseMat(&temp3);
    cvReleaseMat(&vect);
    cvReleaseMat(&vect2);
    cvReleaseMat(&res6);
    cvReleaseMat(&proj);
    DispFile.close();
}
Example #19
0
IplImage* FacePredict::predict(const AAM_Shape& shape, const IplImage& curImage,
					int curAgeG, int newAgeG, bool save)
					
{
	if (newAgeG > NGROUPS || curAgeG > NGROUPS)
	{
		fprintf(stderr, "ERROE(%s, %d): Age group larger than %d\n",
			__FILE__, __LINE__, NGROUPS);
		exit(0);
	}

	if(curImage.nChannels != 3 || curImage.depth != 8)
	{
		fprintf(stderr, "ERROR(%s: %d): The image channels must be 3, "
			"and the depth must be 8!\n", __FILE__, __LINE__);
		exit(0);
	}

	//get the current shape parameters
	AAM_Shape curShape = shape;
	curShape.Centralize();
	double thisfacewidth = curShape.GetWidth();
	if(stdwidth < thisfacewidth) 
		curShape.Scale(stdwidth / thisfacewidth);
	curShape.AlignTo(__AAMRefShape);
	
	CvMat* p = cvCreateMat(1, __nShapeModes, CV_64FC1);
	CvMat* pq = cvCreateMat(1, 4+__nShapeModes, CV_64FC1);
	__shape.CalcParams(curShape, pq);
	cvGetCols(pq, p, 4, 4+__nShapeModes);
	
	//get the current texture parameters
	CvMat* curTexture = cvCreateMat(1, __paw.nPix() * 3, CV_64FC1);
	__paw.FasterGetWarpTextureFromShape(shape, &curImage, curTexture, false);
	/*IplImage *meanImg = cvCreateImage(cvGetSize(&curImage), curImage.depth, curImage.nChannels);
	__paw.GetWarpImageFromVector(meanImg,curTexture);
	cvShowImage("org Texture", meanImg);*/
	__texture.AlignTextureToRef(__MeanT, curTexture);
	CvMat* lamda = cvCreateMat(1, __nTextureModes, CV_64FC1);
	__texture.CalcParams(curTexture, lamda);

	//caculate new shape and texture parameters
	CvMat newShapeParams;
	CvMat* newpq = cvCreateMat(1, 4+__nShapeModes, CV_64FC1);
	cvmSet(newpq, 0, 0, cvmGet(pq, 0, 0));	cvmSet(newpq, 0, 1, cvmGet(pq, 0, 1));
	cvmSet(newpq, 0, 2, cvmGet(pq, 0, 2));	cvmSet(newpq, 0, 3, cvmGet(pq, 0, 3));
	cvGetCols(newpq, &newShapeParams, 4, 4+__nShapeModes);
	FacePredict::CalcNewShapeParams(p, &newShapeParams, curAgeG, newAgeG);

	CvMat* newTextureParams = cvCreateMat(1, __nTextureModes, CV_64FC1);
	FacePredict::CalcNewTextureParams(lamda, newTextureParams, curAgeG, newAgeG) ;

	//calculate the new shape and texture
	AAM_Shape newShape;
	__shape.CalcShape(newpq, newShape);
	/*CvSize newsize;
	AAM_Shape temNS = newShape;
	temNS.Translate(-temNS.MinX(), -temNS.MinY());
	newsize.width = 128;
	newsize.height = 128;
	IplImage *shapeNImg = cvCreateImage(newsize, curImage.depth, curImage.nChannels);
	cvSet(shapeNImg, CV_RGB(0,0,0));
	temNS.Sketch(shapeNImg);
	cvShowImage("new shape", shapeNImg);
	cvReleaseImage(&shapeNImg);*/

	CvMat* newTexture = cvCreateMat(1, __paw.nPix() * 3, CV_64FC1);
	__texture.CalcTexture(newTextureParams, newTexture);
	/*IplImage *meanNImg = cvCreateImage(cvGetSize(&curImage), curImage.depth, curImage.nChannels);
	__paw.GetWarpImageFromVector(meanNImg,newTexture);
	cvShowImage("Texture", meanNImg);*/

	//systhetize the shape and texture
	
	IplImage* newImage = cvCreateImage(cvSize(stdwidth, stdwidth / newShape.GetWidth() * newShape.GetHeight()), IPL_DEPTH_8U, 3);
	FacePredict::FaceSynthesis(newShape, newTexture, newImage);

	if(save)
		cvSaveImage("facial prediction.jpg", newImage);

	cvReleaseMat(&p);
	cvReleaseMat(&pq);
	cvReleaseMat(&curTexture);
	cvReleaseMat(&lamda);
	cvReleaseMat(&newTextureParams);
	cvReleaseMat(&newpq);
	cvReleaseMat(&newTexture);

	return newImage;
}
Example #20
0
void CDataOut::R_Out()
{
  int ii=0;
  RFile<<"library(rgl) "<<endl;
  RFile<<"rgl.clear(\"all\")"<<endl;
  RFile<<"rgl.bg(sphere = TRUE, color = c(\"black\", \"green\"), lit = FALSE, size=2, alpha=0.2, back = \"lines\")"<<endl;
  RFile<<"rgl.light()"<<endl;
  RFile<<"rgl.bbox()"<<endl;
  CvMat *temp2,*temp3;
  temp2=cvCreateMat(3,6,CV_32FC1);
  temp3=cvCreateMat(3,3,CV_32FC1);
  	    CvMat* m = cvCreateMat(3,1,CV_32FC1);
  CvMat *temp,*jtemp;
  temp=cvCreateMatHeader(6,6,CV_32FC1);
  jtemp=cvCreateMat(3,6,CV_32FC1);
  for ( list<CElempunto*>::iterator It=pEstimator->pMap->bbdd.begin();
	It != pEstimator->pMap->bbdd.end(); It++ )
    {
      if((*It)->state>2)
	{
	  float s_th=sin((*It)->theta);
	  float c_th=cos((*It)->theta);
	  float s_ph=sin((*It)->phi);
	  float c_ph=cos((*It)->phi);

	  cvmSet(m,0,0,sin((*It)->phi));
	  cvmSet(m,1,0,-sin((*It)->theta));
	  cvmSet(m,2,0,cos((*It)->phi));
	  cvNormalize( m, m);

	  float xreal=(*It)->wx +cvmGet(m,0,0)/(*It)->rho;
	  float yreal=(*It)->wy +cvmGet(m,1,0)/(*It)->rho;
	  float zreal=(*It)->wz +cvmGet(m,2,0)/(*It)->rho;


	  cvZero(jtemp);
	  cvmSet(jtemp,0,0,1);
	  cvmSet(jtemp,1,1,1);
	  cvmSet(jtemp,2,2,1);

	  cvmSet(jtemp,0,3,(-s_th*s_ph)/(*It)->rho);
	  cvmSet(jtemp,1,3,(-c_th     )/(*It)->rho);
	  cvmSet(jtemp,2,3,(-s_th*c_ph)/(*It)->rho);

	  cvmSet(jtemp,0,4,(c_th*c_ph)/(*It)->rho);
	  cvmSet(jtemp,1,4,(0    )/(*It)->rho);
	  cvmSet(jtemp,2,4,(-c_th*s_ph)/(*It)->rho);

	  cvmSet(jtemp,0,5,(-c_th*s_ph)/((*It)->rho*(*It)->rho));
	  cvmSet(jtemp,1,5,(s_th     )/((*It)->rho*(*It)->rho));
	  cvmSet(jtemp,2,5,(-c_th*c_ph)/((*It)->rho*(*It)->rho));

	  if (12+ii*6< pEstimator->pCovMat->width && 12+ii*6< pEstimator->pCovMat->height)
	    {

	      cvGetSubRect( pEstimator->pCovMat,temp,cvRect(12+ii*6,12+ii*6,6,6) );
	      cvMatMul(jtemp,temp,temp2);
	      cvGEMM( temp2,jtemp,1,NULL,0,temp3,CV_GEMM_B_T );

	      RFile<<"p"<<ii<< " <- matrix(c(" ;
	      for (int i=0; i<2 ; i++)
		for (int j=0; j<3;j++)
		  {
		    RFile<<cvmGet(temp3,i,j)<<",";
		  }
	      RFile<<cvmGet(temp3,2,0)<<",";
	      RFile<<cvmGet(temp3,2,1)<<",";
	      RFile<<cvmGet(temp3,2,2);
	      RFile<<"),3,3)"<<endl;
	      RFile<<"pos <- c("<<xreal<<", ";
	      RFile<<yreal<<", ";
	      RFile<<zreal<<")"<<endl;
	      RFile<<"try(plot3d( ellipse3d(p"<<ii<<",centre=";
	      RFile<<"pos), col=\"blue\", alpha=0.5, add = TRUE) )"<<endl;
	    }
	}
      ii++;
    }
  RFile<<"p"<<ii<< " <- matrix(c(" ;
  for (int i=0; i<2 ; i++)
    for (int j=0; j<3;j++)
      {
	RFile<<cvmGet(pEstimator->pCovMat,2*i,2*j)<<",";
      }
   RFile<<cvmGet(pEstimator->pCovMat,4,0)<<",";
   RFile<<cvmGet(pEstimator->pCovMat,4,2)<<",";
   RFile<<cvmGet(pEstimator->pCovMat,4,4);
   RFile<<"),3,3)"<<endl;
   RFile<<"pos <- c("<< cvmGet(pEstimator->pDataCam->translation,0,0)<<", ";
   RFile<< cvmGet(pEstimator->pDataCam->translation,1,0)<<", ";
   RFile<< cvmGet(pEstimator->pDataCam->translation,2,0)<<") "<<endl;
   RFile<<"plot3d( ellipse3d(p"<<ii<<",centre=";
   RFile<<"pos), col=\"red\", alpha=0.5, add = TRUE) "<<endl;
   RFile<<"rgl.viewpoint(45,30)"<<endl;
   RFile<<"rgl.snapshot(\"c:\\\\out"<<iter<<".png\")"<<endl;
   //RFile.close();

   cvReleaseMat(&temp);
   cvReleaseMat(&temp2);
   cvReleaseMat(&temp3);
   cvReleaseMat(&jtemp);

}
	const CvMat* construct(const PointPair& sample) const {
		cvmSet(A, 0, 0, sample.first.y - sample.second.y);
		cvmSet(A, 0, 1, -sample.first.x + sample.second.x);
		cvmSet(A, 0, 2, sample.first.x * sample.second.y - sample.first.y * sample.second.x);
		return A;
	}
static int lua_predict( lua_State *L ){
  // Grab the user parameters
  double x = lua_tonumber(L, 1); // Current x
  double y = lua_tonumber(L, 2); // Current y
  double vx = lua_tonumber(L, 3); // Current x velocity
  double vy = lua_tonumber(L, 4); // Current y velocity
  double ep = lua_tonumber(L, 5); // Position uncertainty
  double evp = lua_tonumber(L, 6); // Velocity uncertainty
  double th = lua_tonumber(L, 7); // Velocity uncertainty
  double vth = lua_tonumber(L, 8); // Velocity uncertainty
  double px = lua_tonumber(L, 9);
  double py = lua_tonumber(L, 10);
  double pth = lua_tonumber(L, 11); // Velocity uncertainty

  double r = sqrt(x*x+y*y);
  double vr = sqrt(vx*vx+vy*vy);
  double pr = sqrt(px*px+py*py);

  // Set up the 8 paramter sample Matrix
  // Can I send an array to do this faster, though?
  // training_data = [obs_ep(range) obs_evp(range) obs_th(range) obs_pr(range) obs_pth(range) hit_range(range)];

  // training_data_dir = [obs_x(hit_range) obs_y(hit_range) obs_vx(hit_range) obs_vy(hit_range) obs_ep(hit_range) obs_evp(hit_range) obs_th(hit_range) obs_vth(hit_range) obs_r(hit_range) obs_vr(hit_range) obs_px(hit_range) obs_py(hit_range) obs_pr(hit_range) obs_pth(hit_range) th_diff(hit_range) dodge_dir];
  CvMat* sample = cvCreateMat( 1, 5, CV_32FC1 );
  cvmSet(sample,0,0,ep);
  cvmSet(sample,0,1,evp);
  cvmSet(sample,0,2,th);
  cvmSet(sample,0,3,pr);
  cvmSet(sample,0,4,pth);

  CvMat* sample2 = cvCreateMat( 1, 15, CV_32FC1 );
  cvmSet(sample2,0,0,x);
  cvmSet(sample2,0,1,y);
  cvmSet(sample2,0,2,vx);
  cvmSet(sample2,0,3,vy);
  cvmSet(sample2,0,4,ep);
  cvmSet(sample2,0,5,evp);
  cvmSet(sample2,0,6,th);
  cvmSet(sample2,0,7,vth);
  cvmSet(sample2,0,8,r);
  cvmSet(sample2,0,9,vr);
  cvmSet(sample2,0,10,px);
  cvmSet(sample2,0,11,py);
  cvmSet(sample2,0,12,pr);
  cvmSet(sample2,0,13,pth);
  cvmSet(sample2,0,14,pth-th);


  // Predict a move
  #if MODEL==1
  double p = boost->predict( sample );
  #elif MODEL==2
  double p = ddtree->predict( sample )->value;
  #elif MODEL==3
  double p = rtrees->predict( sample );
  #else
  double p = svm->predict( sample );
  #endif

  // Predict direction (only if should dodge
  double pdir = 0;
  if(p==1){
    pdir = ddtree_dir->predict( sample2 )->value;
    if( pdir == 0 )
      pdir = -1;
  }

  lua_pushnumber(L, p);
//  lua_pushnumber(L, pdir);

  // Free the sample that we created
  cvReleaseMat( &sample );
  cvReleaseMat( &sample2 );
  return 1;
}
Example #23
0
void steeringKernel(IplImage *image)
{
	int i, j, k, ii, jj;
	int width, height, step, channels;
	unsigned char *src;
	float *dest;

	height   = image->height;
	width    = image->width;
	step     = image->widthStep;
	channels = image->nChannels;
	src      = (unsigned char *)image->imageData;

	char *gx = (unsigned char *)malloc(sizeof(unsigned char) * width * height);
	char *gy = (unsigned char *)malloc(sizeof(unsigned char) * width * height);

	compute_xgrad(src, gx, width, height);
	compute_ygrad(src, gy, width, height);

	CvMat *covarMat   = cvCreateMat(2, 2, CV_32FC1);
	CvMat *spatialMat = cvCreateMat(1, 2, CV_32FC1); 
    CvMat *outputMat  = cvCreateMat(1, 2, CV_32FC1);

	float *covar      = covarMat->data.fl;
	float *spatial    = spatialMat->data.fl;
    int count = 0;

	dest = (float *)malloc(sizeof(float) * width * height);

	int xg[] = {-1, 0, 1,
				-2, 0, 2,
                -1, 0, 1};

	int yg[] = {-1, -2, -1,
				 0,  0,  0,
				 1,  2,  1};

	for(i = 1; i < height - 1; i++)
	{
		for(j = 1; j < width - 1; j++)
		{
			count = 0;
			cvmSet(spatialMat, 0, 0, 0);
			cvmSet(spatialMat, 0, 1, 0);

			for(ii = max(0,i-1); ii <= min(height-1, i+1); ii++)
			{
				for(jj = max(0,j-1); jj <= min(width-1,j+1); jj++)
				{
					cvmSet(spatialMat, 0, 0, cvmGet(spatialMat, 0, 0) + ((jj - j) * xg[count]));//cvmGet(spatialMat, 0, 0) + (jj - j));
					cvmSet(spatialMat, 0, 1, cvmGet(spatialMat, 0, 1) + ((ii - i) * yg[count]));//cvmGet(spatialMat, 0, 1) + (ii - i));
					count ++;
					//spatial[0] += jj - j;
					//spatial[1] += ii - i;					
				}
			}

			//printf("count: %d\n", count);
			cvmSet(covarMat, 0, 0, gx[i * width + j] * gx[i * width + j]);
			cvmSet(covarMat, 0, 1, gx[i * width + j] * gy[i * width + j]);
			cvmSet(covarMat, 1, 0, gx[i * width + j] * gy[i * width + j]);
			cvmSet(covarMat, 1, 1, gy[i * width + j] * gy[i * width + j]);

			cvMatMul(spatialMat, covarMat, outputMat);
			float *output = outputMat->data.fl;
			float dist = exp(-1 * sqrt(output[0] * output[0] +  output[1] * output[1])  / (2 *  0.008));
			//double det = cvDet(covarMat);
	
			//printf("covar: %f\n",  det);
			
		    dest[i * width + j] = dist; 		
		}
	}

	//computing weights

	float kernelSum;

	for(i = 1; i < height - 1; i++)
	{
		for(j = 1; j < width - 1; j++)
		{
			kernelSum = 0;

			for(ii = max(0,i-1); ii <= min(height-1, i+1); ii++)
			{
				for(jj = max(0,j-1); jj <= min(width-1,j+1); jj++)
				{
					//printf("%f\n", dest[ii * width + jj]);
					kernelSum += dest[ii * width + jj];					
				}
			}
			src[i * width + j] =  (unsigned char)(src[i * width + j] * (1 + (dest[i * width + j] / kernelSum)));
			//printf("weighted: %f\n", dest[i * width + j] / kernelSum);	
		}
	}
	
	//memcpy(dest, src, sizeof(unsigned char) * width * height);

}
Example #24
0
int main( int argc, char** argv )
{

	IplImage* img1, * img2, * stacked;
	struct feature* feat1, * feat2, * feat;
	struct feature** nbrs;
	struct kd_node* kd_root;
	CvPoint pt1, pt2;
	double d0, d1;
	int n1, n2, k, i, m = 0;
	int match_cnt = 0;

	//CvMat *im1_mask = cvCreateMat( img1->height, img1->width, CV_64FC1 );
	//CvMat *im2_mask = cvCreateMat( img2->height, img2->width, CV_64FC1 );
  
	//cvSet( im1_mask, cvScalar( 1, 0, 0, 0 ), NULL );
	//cvSet( im2_mask, cvScalar( 1, 0, 0, 0 ), NULL );

	if( argc != 3 )
		fatal_error( "usage: %s <img1> <img2>", argv[0] );
  
	img1 = cvLoadImage( argv[1], 1 );
	if( ! img1 )
		fatal_error( "unable to load image from %s", argv[1] );
	img2 = cvLoadImage( argv[2], 1 );
	if( ! img2 )
		fatal_error( "unable to load image from %s", argv[2] );
	stacked = stack_imgs( img1, img2 );

	fprintf( stderr, "Finding features in %s...\n", argv[1] );
	n1 = sift_features( img1, &feat1 );
	fprintf( stderr, "Finding features in %s...\n", argv[2] );
	n2 = sift_features( img2, &feat2 );
	kd_root = kdtree_build( feat2, n2 );
	for( i = 0; i < n1; i++ )
    {
		feat = feat1 + i;
		k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
		if( k == 2 )
		{
			d0 = descr_dist_sq( feat, nbrs[0] );
			d1 = descr_dist_sq( feat, nbrs[1] );
			if( d0 < d1 * NN_SQ_DIST_RATIO_THR  )
			{
				if( m >= 2000 ) break;
				pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) );
				pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );
				pt2.y += img1->height;
				cvLine( stacked, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 );
				m++;
				feat1[i].fwd_match = nbrs[0];
			 }
		}
		free( nbrs );
    }

	fprintf( stderr, "Found %d total matches\n", m );
	display_big_img( stacked, "Matches" );
	cvWaitKey( 0 );
	/*********************************************************************************************************/
     
	CvMat* H;
    IplImage* xformed;
    H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01, homog_xfer_err, 3.0, NULL, NULL );
	/* output H */
	printf("xform Homography Matrix is :\n"); 
	printMat( H );

	
	/* get the size of new image  */
	double XDATA[2];
	double YDATA[2];
	CvSize new_size;
	CvSize im1_size = cvGetSize( img1 );
	CvSize im2_size = cvGetSize( img2 );
		
	new_size = get_Stitched_Size( im1_size, im2_size, H, XDATA, YDATA );


	/*declare the mask*/
	CvMat *im1_mask = cvCreateMat( new_size.height, new_size.width, CV_64FC1 );
	CvMat *im2_mask = cvCreateMat( new_size.height, new_size.width, CV_64FC1 );

	CvMat *im1_tempMask = cvCreateMat( im1_size.height, im1_size.width, CV_64FC1 );
	CvMat *im2_tempMask = cvCreateMat( im2_size.height, im2_size.width, CV_64FC1 );
	cvSet( im1_tempMask, cvScalar(1,0,0,0), NULL );
	cvSet( im2_tempMask, cvScalar(1,0,0,0), NULL );

	/*  get translation Matrix for Aligning */
	CvMat *T = cvCreateMat( 3, 3, CV_64FC1 );
	double tx = 0;
	double ty = 0;
	if( XDATA[0] < 0 )
	{
		tx =  -XDATA[0] ;
		printf("tx = %f\n", tx );
	}
	if( YDATA[0] < 0 )
	{
		ty = -YDATA[0];
		printf("ty = %f\n", ty );
	}
	cvmSet( T, 0, 0, 1 );
	cvmSet( T, 0, 2, tx );
	cvmSet( T, 1, 1, 1 );
	cvmSet( T, 1, 2, ty );
	cvmSet( T, 2, 2, 1 );
	printf("T Matrix:\n");
	printMat( T );

	/* Transform and Align image2 */
	cvGEMM( T, H, 1, NULL, 0, H, 0 );
	printMat( H );
	xformed = cvCreateImage( new_size, IPL_DEPTH_8U, 3 );
	cvWarpPerspective( img1, xformed, H, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
	cvNamedWindow( "Xformed1", 1 );
	cvShowImage( "Xformed1", xformed );
	cvWaitKey( 0 );
	cvDestroyWindow("Xformed1");
	//cvSaveImage("im2.png", xformed);
	cvWarpPerspective( im1_tempMask, im1_mask, H, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
	//cvSaveImage("im1_mask.png", im1_mask);
	cvNamedWindow( "im1_mask", 1 );
	cvShowImage( "im1_mask", im1_mask );
	cvWaitKey( 0 );
	cvDestroyWindow("im1_mask");

	/* Align image1 to bound */
	cvWarpPerspective( im2_tempMask, im2_mask, T, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
	//cvSaveImage( "im2_mask.png", im2_mask );
	cvNamedWindow( "im12_mask", 1 );
	cvShowImage( "im2_mask", im2_mask );
	cvWaitKey( 0 );
	cvDestroyWindow("im2_mask");
	cvSetImageROI( xformed, cvRect( tx, ty, img2->width, img2->height ) );
	cvCopy( img2, xformed, NULL );
	
	IplImage* huaijin = cvCreateImage( new_size, IPL_DEPTH_8U, 3 );
	cvWarpPerspective( img2, huaijin, T, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
	cvNamedWindow( "im12_mask_i", 1 );
	cvShowImage( "im2_mask_i", huaijin);
	cvWaitKey( 0 );
	cvDestroyWindow("im2_mask_i");
	cvResetImageROI( xformed );
	//cvSaveImage( "re.png", xformed );


	/* composite */


	cvNamedWindow( "Xformed", 1 );
	cvShowImage( "Xformed", xformed );
	cvWaitKey( 0 );
	cvDestroyWindow("Xformed");
	/*  */


	cvReleaseImage( &xformed );
	cvReleaseMat( &H );
	cvReleaseMat( &im1_tempMask );
	cvReleaseMat( &im2_tempMask );
	cvReleaseMat( &T );
	cvReleaseMat ( &im1_mask );
	cvReleaseMat ( &im2_mask );
	cvReleaseImage( &stacked );
	cvReleaseImage( &img1 );
	cvReleaseImage( &img2 );
	kdtree_release( kd_root );
	free( feat1 );
	free( feat2 );

		
/****************************************************
 * using RANSAC algorithm get the Homegraphy Matrix
 * get T image1-->image2
 * 
 * n_pts		the number of  points for estimating parameters  
 *
 *
 *
 * create unique indics of matchs : get_randi( int j )
 *
 *
 * ******slove*********** 
 * 1.create the coff matrix
 * 2.Ah=0 -->decomposit A = UDV^T using gsl 
 * 3 
 *      [ v19 v29 v39 ... v99 ]
 * h = -------------------------
 *               v99
 *
 * ***************************************************/
	/*
	CvMat *H1 = cvCreateMat( 3, 3, CV_64FC1 );
	CvMat *inliers_mask = cvCreateMat( m, 1, CV_64FC1 );
	RANSAC_Homography( m, pts1, pts2, H1, inliers_mask );
	printf("my code  Homography Matrix is :\n"); 
	for ( i = 0; i < H->rows; i++ ){
		for( k = 0; k < H->cols; k++ ){
			printf("%f	",cvmGet( H1, i, k ));
		}
		printf("\n");
	}


	cvReleaseMat( &H1 );
	cvReleaseMat( &inliers_mask );*/
/***********************************************
 * composit image1 & image2 
 * 1) transform image2 to image
 *
 * 2)***stitched image bounds****
 * W = max( [size(im1,2) size(im1,2)-XDATA(1) size(im2,2) size(im2,2)+XDATA(1)] );
 * H = max( [size(im1,1) size(im1,1)-YDATA(1) size(im2,1) size(im2,1)+YDATA(1)] );
 *
 * 3)*** Align image1 to bound ***
 *
 * 4)*** Align image2 to bound ***
 * 
 * 5)*** Check  size of bounds *** 
 * 
 * 6)*** combine both images ***
 *		im1_mask
 *		im2_mask
 *		im1_part_mask
 *		im2_part_mask
 *		com_part_mask
 *		stitched_image
 * 
 * 7)****copy im2 transformed to ROI of stitching plan ( just a idear )
 *
 ************************************************/

  void compositImages( IplImage *im1, IplImage *im2, CvMat *H )
  {
	/*  1.create a plan																			*/
	/*  2.transform im2 & im2_mask																*/
	/*	3.emstime translation of im2 --T  cp im1 -->plan with cvRect( tx, ty)					*/
	/*	4.cp tranformed im2 to plan with im2_mask where im2_mask has the same size with plan	*/

  
  }

  return 0;
}
Example #25
0
/*
Calculates a least-squares planar homography from point correspondeces.

@param pts array of points
@param mpts array of corresponding points; each pts[i], i=0..n-1, corresponds
	to mpts[i]
@param n number of points in both pts and mpts; must be at least 4

@return Returns the 3 x 3 least-squares planar homography matrix that
	transforms points in pts to their corresponding points in mpts or NULL if
	fewer than 4 correspondences were provided
*/
CvMat* lsq_homog( CvPoint2D64f* pts, CvPoint2D64f* mpts, int n )
{
	CvMat* H, * A, * B, X;
	double x[9];
	int i;

	if( n < 4 )
	{
		fprintf( stderr, "Warning: too few points in lsq_homog(), %s line %d\n",
			__FILE__, __LINE__ );
		return NULL;
	}

	/* set up matrices so we can unstack homography into X; AX = B */
	A = cvCreateMat( 2*n, 8, CV_64FC1 );
	B = cvCreateMat( 2*n, 1, CV_64FC1 );
	X = cvMat( 8, 1, CV_64FC1, x );
	H = cvCreateMat(3, 3, CV_64FC1);
	cvZero( A );
	for( i = 0; i < n; i++ )
	{
		cvmSet( A, i, 0, pts[i].x );
		cvmSet( A, i+n, 3, pts[i].x );
		cvmSet( A, i, 1, pts[i].y );
		cvmSet( A, i+n, 4, pts[i].y );
		cvmSet( A, i, 2, 1.0 );
		cvmSet( A, i+n, 5, 1.0 );
		cvmSet( A, i, 6, -pts[i].x * mpts[i].x );
		cvmSet( A, i, 7, -pts[i].y * mpts[i].x );
		cvmSet( A, i+n, 6, -pts[i].x * mpts[i].y );
		cvmSet( A, i+n, 7, -pts[i].y * mpts[i].y );
		cvmSet( B, i, 0, mpts[i].x );
		cvmSet( B, i+n, 0, mpts[i].y );
	}
	cvSolve( A, B, &X, CV_SVD );
	x[8] = 1.0;
	X = cvMat( 3, 3, CV_64FC1, x );
	cvConvert( &X, H );

	cvReleaseMat( &A );
	cvReleaseMat( &B );
	return H;
}
Example #26
0
TargetObject::TargetObject(int idd, std::string namee, float xx, float yy, float zz, float vxx, float vyy, float vzz, int r, int g, int b, int inacTime):
	id(idd),
	name(namee),
	x(xx),
	y(yy),
	z (zz),
	vx(vxx),
	vz(vzz),
	vy(vyy),
	red(r),
	green(g),
	blue(b),
	//associated_to_cam_data(false),
	//associated_to_any_data(false),
	hue(NOF_HUEBINS/2.0),
	weight(-1),
	hueDataAvailable(false),
	currentHueValue(1.0),
	minMahaDist(999.),
	attentionalPriority(20. + ((float)rand()/(float)RAND_MAX)),
	sendx(xx),
	sendy(yy),
	sendz(zz),
	tapx(-0.5),
	tapy(0.0)
{

	predX = x;
	predY = y;

	weightReset = false;
	curWeight = 0.0;
	prevWeight = 0.0;
	speedVec.clear();
	speedVecx.clear();
	speedVecy.clear();
	meanSpeed = 0.0;
	meanSpeedx = 0.0;
	meanSpeedy = 0.0;
	computeSpecialID();

	//tempTimer.setActiveTime(); // this is testing timer..delete later
	//newHue = false;
	
	// set timer
	timer.setActiveTime();
	//std::std::cout << "targets previousActive time at creation = " << timer->previousActiveTime << std::std::endl;

	// initialize measurement matrtimer.setActiveTime();ix
	measurement = cvCreateMat( MES_SIZE, 1, CV_32FC1 );

	// initialize measurement_prediction matrix
	measurement_pred = cvCreateMat( MES_SIZE, 1, CV_32FC1 );

	// temporary matrices
	combined_innov = cvCreateMat( MES_SIZE, 1, CV_32FC1 );
	tempo1 = cvCreateMat( MES_SIZE, 1, CV_32FC1 );
	tempo2 = cvCreateMat( KALMAN_SIZE, 1, CV_32FC1 );
	tempoTrans1 = cvCreateMat( 1, MES_SIZE, CV_32FC1 );
	tempCov1 = cvCreateMat(MES_SIZE,KALMAN_SIZE,CV_32FC1);		
	tempCov2 = cvCreateMat(KALMAN_SIZE,MES_SIZE,CV_32FC1);		
	tempCov3 = cvCreateMat(MES_SIZE,MES_SIZE,CV_32FC1);
	tempCov4 = cvCreateMat(KALMAN_SIZE,KALMAN_SIZE,CV_32FC1);
	tempCov5 = cvCreateMat(KALMAN_SIZE,KALMAN_SIZE,CV_32FC1);
	tempCov6 = cvCreateMat(MES_SIZE,MES_SIZE,CV_32FC1);
	combInnovCov = cvCreateMat(MES_SIZE,MES_SIZE,CV_32FC1);

	/// extra error
	extra_error = cvCreateMat(KALMAN_SIZE,KALMAN_SIZE,CV_32FC1);

	// JPDA event probabilities
	event_probs = new std::vector<float>;

	all_innovs = new std::vector<CvMat*>; 


	all_measurements = new std::vector<high_dim_data>;
	// CvKalman* cvCreateKalman( int dynam_params, int measure_params, int control_params=0 );
	kalman  = cvCreateKalman( KALMAN_SIZE, MES_SIZE, CONTROL_SIZE );    // state is x,y,vx,vy and control vector is ax,ay (=acceleration)
	
	// measurement is x,y,z

	//std::cout << "kalman state size = " << kalman->DP << std::endl; 
	//std::cout << "kalman mes size = " << kalman->MP << std::endl;

	// set the current state
	CvMat* M = kalman->state_post;
	int   step  = M->step/sizeof(float);
	float *data = M->data.fl;

	(data+0*step)[0] = xx;
	(data+1*step)[0] = yy;
	(data+2*step)[0] = zz;
	(data+3*step)[0] = vxx;
	(data+4*step)[0] = vyy;
	(data+5*step)[0] = vzz;
	(data+6*step)[0] = 0.0; // acceleration
	(data+7*step)[0] = 0.0;
	(data+8*step)[0] = hue; // hue
	(data+9*step)[0] = weight; // weight




	/// set the prediction to the same as x, y (for validation gate)
	cvmSet(kalman->state_pre,0,0, 300.);
	cvmSet(kalman->state_pre,1,0, 300.);
	notAssociated = false;
	lostTrack = false;

	//cvSetIdentity( extra_error, cvRealScalar(60) ); /// 3500 until april 2008

	// initialize the Kalman vectors and matrices
	//const float A[] = { 1.0, 0.0, 0.0, 0.0, 1.0 ,0.0,0.0,0.0,1.0 }; // transition matrix
// 	const float A[] = { 	1.0, 0.0, 1.0, 0.0, 1.0, 0.0 ,
// 			    	0.0, 1.0, 0.0, 1.0, 0.0, 1.0,
// 				0.0, 0.0, 1.0, 0.0, 1.0, 0.0,
// 				0.0, 0.0, 0.0, 1.0, 0.0, 1.0,
// 				0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
// 				0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }; // transition matrix

	//                      X,   Y,   Z,   VX,  VY,  VZ,  AX,  AY,  AZ,  HUE  WEI

	const float A[] = { 	1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
			    	0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,  0.0, 0.0, 1.0,
				}; // transition matrix 

	
	memcpy( kalman->transition_matrix->data.fl, A, sizeof(A));
	//printMatrix(kalman->transition_matrix, "transition matrix");
	///std::cout << "ok 1" << std::endl;
	// H = measurement matrix 
	//const float MES[] = { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 ,
	//		      0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }; //measurement matrix

	const float MES[] = { 
	1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
	0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
	0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 
	0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
	0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0
 				}; //


	memcpy( kalman->measurement_matrix->data.fl, MES, sizeof(MES));
        //printMatrix(kalman->measurement_matrix, "measurement matrix");

	

	/// process noice covariance matrix 

	const float B[] = {     0.01, 0.0, 0.0, 0.001, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0,
			    	0.0, 0.01, 0.0, 0.0, 0.001, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.001, 0.0, 0.0, 0.001, 0.0, 0.0, 0.001, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.001, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.0,
				0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,  0.0, 0.0, 0.01	 
				}; 

	///memcpy( kalman->process_noise_cov->data.fl, B, sizeof(B));
  	
	cvSetIdentity( kalman->process_noise_cov, cvRealScalar(1.0) );
        cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(0.01) );

	/// meaurement noice covariance
	///cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(1.0));
       
	
	/// estimation error covariance (P-Schlange)
	cvSetIdentity( kalman->error_cov_post, cvRealScalar(1.0)); 
	estimationAdvanced = false;

	/// testing
	//std::cout << "target created: id, x,y,hue,weight = " << id << ", " << x << ", " << y << ", " << hue << ", " << weight << std::endl;

	/// for computation of the validation gate
	xaxis = 100.;
	yaxis = 100.;
	growth_factor = 0.1;

	
}
Example #27
0
bool CamAugmentation::CreateHomographyRotationTranslationMatrix( int c ){
  int i;

  // Get pointer to homography:
  CvMat* m_homography = v_homography[c]->m_homography;

  if( m_homography ){

    // Vectors holding columns of H and R:
    double a_H1[3];
    CvMat  m_H1 = cvMat( 3, 1, CV_64FC1, a_H1 );
    for( i = 0; i < 3; i++ ) cvmSet( &m_H1, i, 0, cvmGet( m_homography, i, 0 ) );

    double a_H2[3];
    CvMat  m_H2 = cvMat( 3, 1, CV_64FC1, a_H2 );
    for( i = 0; i < 3; i++ ) cvmSet( &m_H2, i, 0, cvmGet( m_homography, i, 1 ) );

    double a_H3[3];
    CvMat  m_H3 = cvMat( 3, 1, CV_64FC1, a_H3 );
    for( i = 0; i < 3; i++ ) cvmSet( &m_H3, i, 0, cvmGet( m_homography, i, 2 ) );

    double a_CinvH1[3];
    CvMat  m_CinvH1 = cvMat( 3, 1, CV_64FC1, a_CinvH1 );

    double a_R1[3];
    CvMat  m_R1 = cvMat( 3, 1, CV_64FC1, a_R1 );

    double a_R2[3];
    CvMat  m_R2 = cvMat( 3, 1, CV_64FC1, a_R2 );

    double a_R3[3];
    CvMat  m_R3 = cvMat( 3, 1, CV_64FC1, a_R3 );

    // The rotation matrix:
    double a_R[9];
    CvMat  m_R = cvMat( 3, 3, CV_64FC1, a_R );

    // The translation vector:
    double a_T[3];
    CvMat  m_T = cvMat( 3, 1, CV_64FC1, a_T );

    ////////////////////////////////////////////////////////
    // Create inverse calibration matrix:
    CvMat* m_Cinv = InverseCalibrationMatrix( s_optimal.v_camera_c[c] );

    // Create norming factor lambda:
    cvGEMM( m_Cinv, &m_H1, 1, NULL, 0, &m_CinvH1, 0 );

    // Search next orthonormal matrix:
    if( cvNorm( &m_CinvH1, NULL, CV_L2, NULL ) != 0 ){
      double lambda = 1.00/cvNorm( &m_CinvH1, NULL, CV_L2, NULL );

      // Create normalized R1 & R2:
      cvGEMM( m_Cinv, &m_H1, lambda, NULL, 0, &m_R1, 0 );
      cvGEMM( m_Cinv, &m_H2, lambda, NULL, 0, &m_R2, 0 );

      // Get R3 orthonormal to R1 and R2:
      cvCrossProduct( &m_R1, &m_R2, &m_R3 );

      // Put the rotation column vectors in the rotation matrix:
      for( i = 0; i < 3; i++ ){
        cvmSet( &m_R, i, 0,  cvmGet( &m_R1, i, 0 ) );
        cvmSet( &m_R, i, 1,  cvmGet( &m_R2, i, 0 ) );
        cvmSet( &m_R, i, 2,  cvmGet( &m_R3, i, 0 ) );
      }

      // Calculate Translation Vector T (- because of its definition):
      cvGEMM( m_Cinv, &m_H3, -lambda, NULL, 0, &m_T, 0 );

      // Transformation of R into - in Frobenius sense - next orthonormal matrix:
      double a_W[9];	CvMat  m_W  = cvMat( 3, 3, CV_64FC1, a_W  );
      double a_U[9];	CvMat  m_U  = cvMat( 3, 3, CV_64FC1, a_U  );
      double a_Vt[9];	CvMat  m_Vt = cvMat( 3, 3, CV_64FC1, a_Vt );
      cvSVD( &m_R, &m_W, &m_U, &m_Vt, CV_SVD_MODIFY_A | CV_SVD_V_T );
      cvMatMul( &m_U, &m_Vt, &m_R );


      // Put the rotation matrix and the translation vector together:
      double a_view_to_cam[12];
      CvMat  m_view_to_cam = cvMat( 3, 4, CV_64FC1, a_view_to_cam );
      CamCalibration::ComposeRotationTranslationTo3x4Matrix( &m_view_to_cam, &m_R, &m_T );

      // Transfer to global reference coordinate system:
      double a_cam_inv[12];
      CvMat  m_cam_inv = cvMat( 3, 4, CV_64FC1, a_cam_inv );
      CamCalibration::Mat3x4Inverse( s_optimal.v_camera_r_t[c], &m_cam_inv );
      CamCalibration::Mat3x4Mul( &m_cam_inv, &m_view_to_cam, v_homography_r_t );

      return true;
    }
  }
  return false;
}
Example #28
0
// stores this event for the target
void TargetObject::storeJPDAevent(float xx, float yy, float zz,  float huee, float weightt, float prob){
	
	//std::cout << "in store JPDA event: xx, yy, prob = " << xx << ", " << yy << ", " << huee << ", " << weightt << ", " << prob <<  std::endl;
	
	high_dim_data newData;
	newData.x = xx;
	newData.y = yy;
	newData.z = zz;

	

	if (!hueDataAvailable){
		newData.hue = hue;
		
	}
	else{

		if ((abs(currentHueValue - hue) < 4.5) || (hue <= 0.0)){
			newData.hue = currentHueValue;
			// reduce attentional prioriy
			attentionalPriority = attentionalPriority / 2.0; //0. +
				// ((float)rand()/(float)RAND_MAX);
			if (attentionalPriority < 1.0){attentionalPriority = ((float)rand()/(float)RAND_MAX); }
		}
		else{
			std::cout << "------------------------> hue diff = " << abs(currentHueValue - hue) << ", hue = " << hue <<  std::endl;
			newData.hue = hue + (currentHueValue - hue)/2.0;
			
			hueDataAvailable = false;
		}
		
	}
	newData.weight = weightt;
	all_measurements->push_back(newData);
	
	
	event_probs->push_back(prob);

	/// compute the innovation
	CvMat* tempo3 = cvCreateMat( MES_SIZE, 1, CV_32FC1 );
	cvmSet( tempo1, 0, 0, xx);
	cvmSet( tempo1, 1, 0, yy);
	cvmSet( tempo1, 2, 0, zz);
	cvmSet( tempo1, 3, 0, huee);
	cvmSet( tempo1, 4, 0, weightt);

	cvmSet( measurement_pred, 0, 0,  cvmGet(kalman->state_pre,0,0));
	cvmSet( measurement_pred, 1, 0,  cvmGet(kalman->state_pre,1,0));
	cvmSet( measurement_pred, 2, 0,  cvmGet(kalman->state_pre,6,0));
	cvmSet( measurement_pred, 3, 0,  cvmGet(kalman->state_pre,7,0));

	cvSub(tempo1, measurement_pred, tempo3, 0);
	
	///printMatrix(tempo1, "target tempo 1: measurement");
	

	///printMatrix(kalman->state_pre, "kalman->state_pre");

	/// now scale this with the prob 
	///cvConvertScale( tempo3, tempo3, prob, 0 );
	///printMatrix(tempo3, "temppo 3 innovation");

	/// insert this innovation into all_innovs
	all_innovs->push_back(tempo3);

	/// update the combined innovation
	//std::cout << "------------------------> prob = " << prob << std::endl;
	CvScalar prob_scal = cvScalarAll(prob);
// 	std::cout << "targobj: before cvScaleAdd" << std::endl;
// 	std::cout << "tempo3: " << tempo3->rows << "," << tempo3->cols << std::endl;
// 	
// 	std::cout << "combined_innov: " << combined_innov->rows << "," << combined_innov->cols << std::endl;
// 	std::cout << "tempo1: " << tempo1-> rows << "," << tempo1->cols << std::endl;

	cvScaleAdd( tempo3, prob_scal, combined_innov, tempo1);
	//std::cout << "targobj: after cvScaleAdd" << std::endl;
	cvCopy(tempo1, combined_innov, 0);	

	///printMatrix(combined_innov, "combined_innov...1");
}
Example #29
0
//============================================================================
void AAM_Basic::EstPoseGradientMatrix(CvMat* GPose, 
									  const CvMat* CParams,
									  const std::vector<AAM_Shape>& AllShapes,
									  const std::vector<IplImage*>& AllImages, 
									  const CvMat* vPoseDisps)
{
	int nExperiment = 0;
	int ntotalExp = AllShapes.size()*vPoseDisps->rows/2*vPoseDisps->cols;
    int npixels = __cam.__texture.nPixels();
	int npointsby2 = __cam.__shape.nPoints()*2;
	int smodes = __cam.__shape.nModes();
	CvMat c;									//appearance parameters
	CvMat* q = cvCreateMat(1, 4, CV_64FC1);		//pose parameters
	CvMat* q1 = cvCreateMat(1, 4, CV_64FC1);
	CvMat* q2 = cvCreateMat(1, 4, CV_64FC1);
	CvMat* p = cvCreateMat(1, smodes, CV_64FC1);//shape parameters
	CvMat* s1 = cvCreateMat(1, npointsby2, CV_64FC1);//shape vector
    CvMat* s2 = cvCreateMat(1, npointsby2, CV_64FC1);
    CvMat* t1 = cvCreateMat(1, npixels, CV_64FC1);//texture vector
    CvMat* t2 = cvCreateMat(1, npixels, CV_64FC1);
	CvMat* delta_g1 = cvCreateMat(1, npixels, CV_64FC1);
	CvMat* delta_g2 = cvCreateMat(1, npixels, CV_64FC1);
	CvMat* cDiff = cvCreateMat(1, npixels, CV_64FC1);
    std::vector<double> normFactors; normFactors.resize(4);
	CvMat* AbsPoseDisps = cvCreateMat(vPoseDisps->rows, vPoseDisps->cols, CV_64FC1);

	// for each training example in the training set
	for(int i = 0; i < AllShapes.size(); i++)
	{
		cvGetRow(CParams, &c, i);

		//calculate pose parameters
		AllShapes[i].Point2Mat(s1);
		__cam.__shape.CalcParams(s1, p, q);
		
		cvCopy(vPoseDisps, AbsPoseDisps);
		int w = AllShapes[i].GetWidth();
		int h = AllShapes[i].GetHeight();
		int W = AllImages[i]->width;
		int H = AllImages[i]->height;

		for(int j = 0; j < vPoseDisps->rows; j+=2)
		{
			//translate relative translation to abs translation about x & y
			CV_MAT_ELEM(*AbsPoseDisps, double, j, 2) *= w;
			CV_MAT_ELEM(*AbsPoseDisps, double, j, 3) *= h;
			CV_MAT_ELEM(*AbsPoseDisps, double, j+1, 2) *= w;
			CV_MAT_ELEM(*AbsPoseDisps, double, j+1, 3) *= h;

			for(int k = 0; k < vPoseDisps->cols; k++)
			{
				printf("Performing (%d/%d)\r", nExperiment++, ntotalExp);

				//shift current pose parameters 
				cvCopy(q, q1);
                cvCopy(q, q2);
				if(k == 0 || k == 1){
					double scale, theta, dscale1, dtheta1, dscale2, dtheta2;
					scale = MAG(cvmGet(q,0,0)+1,cvmGet(q,0,1));
					theta = THETA(cvmGet(q,0,0)+1,cvmGet(q,0,1));
					dscale1 = MAG(cvmGet(AbsPoseDisps,j,0)+1,cvmGet(AbsPoseDisps,j,1));
					dtheta1 = THETA(cvmGet(AbsPoseDisps,j,0)+1,cvmGet(AbsPoseDisps,j,1));
					dscale2 = MAG(cvmGet(AbsPoseDisps,j+1,0)+1,cvmGet(AbsPoseDisps,j+1,1));
					dtheta2 = THETA(cvmGet(AbsPoseDisps,j+1,0)+1,cvmGet(AbsPoseDisps,j+1,1));

					dscale1 = dscale1*scale;		dtheta1 += theta;
					dscale2 = dscale2*scale;		dtheta2 += theta;
					
					if(k == 0){
						cvmSet(q1,0,0,dscale1*cos(theta)-1);
						cvmSet(q1,0,1,dscale1*sin(theta));
						cvmSet(q2,0,0,dscale2*cos(theta)-1);
						cvmSet(q2,0,1,dscale2*sin(theta));
					}
					
					else{
						cvmSet(q1,0,0,scale*cos(dtheta1)-1);
						cvmSet(q1,0,1,scale*sin(dtheta1));
						cvmSet(q2,0,0,scale*cos(dtheta2)-1);
						cvmSet(q2,0,1,scale*sin(dtheta2));				
					}
				}
				
				else
				{
					cvmSet(q1,0,k,cvmGet(q,0,k)+cvmGet(AbsPoseDisps,j,k));
					cvmSet(q2,0,k,cvmGet(q,0,k)+cvmGet(AbsPoseDisps,j+1,k));
				}

				//generate model texture
				__cam.CalcTexture(t1, &c);
				__cam.CalcTexture(t2, &c);
//				{
//					char filename[100];
//					sprintf(filename, "a/%d.jpg", nExperiment);
//					__cam.__paw.SaveWarpImageFromVector(filename, t1);
//				}

				//generate model shape
				//__cam.__shape.CalcShape(p, q1, s1);
				//__cam.__shape.CalcShape(p, q2, s2);
				__cam.CalcLocalShape(s1, &c); __cam.__shape.CalcGlobalShape(q1,s1);
				__cam.CalcLocalShape(s2, &c); __cam.__shape.CalcGlobalShape(q2,s2);

				//sample the shape to get warped texture
				if(!AAM_Basic::IsShapeWithinImage(s1, W, H)) cvZero(delta_g1);
				else __cam.__paw.FasterGetWarpTextureFromMatShape(s1, AllImages[i],	delta_g1, true);
				__cam.__texture.AlignTextureToRef(__cam.__MeanG, delta_g1);
				
				if(!AAM_Basic::IsShapeWithinImage(s2, W, H)) cvZero(delta_g2);
				else __cam.__paw.FasterGetWarpTextureFromMatShape(s2, AllImages[i], delta_g2, true);
				__cam.__texture.AlignTextureToRef(__cam.__MeanG, delta_g2);
		
//				{
//					char filename[100];
//					sprintf(filename, "a/%d-.jpg", nExperiment);
//					__cam.__paw.SaveWarpImageFromVector(filename, delta_g1);
//					sprintf(filename, "a/%d+.jpg", nExperiment);
//					__cam.__paw.SaveWarpImageFromVector(filename, delta_g2);
//				}
				
				//calculate pixel difference(g_s - g_m)
				cvSub(delta_g1, t1, delta_g1);
				cvSub(delta_g2, t2, delta_g2);
				
				//form central displacement
				cvSub(delta_g2, delta_g1, cDiff);
				cvConvertScale(cDiff, cDiff, 1.0/(cvmGet(AbsPoseDisps,j+1,k)-cvmGet(AbsPoseDisps,j,k)));

				//accumulate into k-th row
				CvMat Gk; cvGetRow(GPose, &Gk, k);
				cvAdd(&Gk, cDiff, &Gk);

				//increment normalisation factor
				normFactors[k] = normFactors[k]+1;
			}
		}
	}

	//normalize
	for(int j = 0; j < vPoseDisps->cols; j++)
	{
		CvMat Gj; cvGetRow(GPose, &Gj, j);
		cvConvertScale(&Gj, &Gj, 1.0/normFactors[j]);
	}

	cvReleaseMat(&s1); cvReleaseMat(&s2);
	cvReleaseMat(&t1); cvReleaseMat(&t2);
	cvReleaseMat(&delta_g1); cvReleaseMat(&delta_g2);
	cvReleaseMat(&cDiff);
	cvReleaseMat(&AbsPoseDisps);
}
CvMat *change(CvMat *original,int rti)
{
	CvMat *mat_k,*mat_k_inv,*mat_rt,*mat_inv_e;
	mat_rt = cvCreateMat(3,4,CV_64FC1);
	mat_inv_e = cvCreateMat(4,4,CV_64FC1);

	double _e[] = {-1,0,0,0,
		0,-1,0,0,
		0,0,-1,0,
		0,0,0,-1};	
	cvInitMatHeader(mat_inv_e,4,4,CV_64FC1,_e);

	mat_k =  cvCreateMat( 3, 3, CV_64FC1);

	double _k[] = {-800,0,599.5,0,800,399.5,0,0,1};
	cvInitMatHeader( mat_k, 3, 3, CV_64FC1, _k);



	mat_k_inv = cvCreateMat( 3, 3, CV_64FC1);
	cvInvert(mat_k, mat_k_inv, CV_SVD);




	cvmSet(mat_k,0,0,800);



	cvMatMulAdd( mat_k_inv, original, 0, mat_rt);

	Print_Mat(mat_rt);


	cvMatMulAdd( mat_rt, mat_inv_e, 0, mat_rt);


	Print_Mat(mat_rt);





	//if (rti == 1)
	//{
	//	CvMat* ad, *mul ,*e ,*inv_rt ,*rt_change , *rt_e;
	//	ad = cvCreateMat(3,4,CV_64FC1);
	//	mul = cvCreateMat(4,4,CV_64FC1);
	//	e = cvCreateMat(4,4,CV_64FC1);
	//	inv_rt = cvCreateMat(4,3,CV_64FC1);
	//	rt_change = cvCreateMat(4,4,CV_64FC1);
	//	rt_e = cvCreateMat(3,4,CV_64FC1);

	//	double _add[] = {0,0,0,0.12,
	//		0,0,0,-0.43,
	//		0,0,0,-0.17};
	//	cvInitMatHeader(ad,3,4,CV_64FC1,_add);

	//	double _rt_e[] = {1,0,0,0,
	//	                  0,1,0,0,
	//	                  0,0,1,0};

	//	cvInitMatHeader(rt_e,3,4,CV_64FC1,_rt_e);


	//	double _e[] ={1,0,0,0,
	//	              0,1,0,0,
	//	              0,0,1,0,
	//	              0,0,0,1};

	//	cvInitMatHeader(e,4,4,CV_64FC1,_e);

	//	cvMatMulAdd(mat_rt,e,ad,mat_rt);

	//	cout<<"mat_rt:";

	//	Print_Mat(mat_rt);


	//	cvInvert(mat_rt, inv_rt, CV_SVD);

	//	cout<<"inv_rt:";

	//	Print_Mat(inv_rt);


	//	cvMatMulAdd(inv_rt,rt_e,0,rt_change);

	//	cout<<"rt_change:";
	//	Print_Mat(rt_change);

	//    CvMat *test;
	//	test = cvCreateMat(3,4,CV_64FC1);

	//	cvMatMulAdd(mat_rt,rt_change,0,test);

	//	cout<<"test:";
	//	Print_Mat(test);


	//}
	//CvMat *rt_change,*rt_add,*rt_e;
	//rt_change = cvCreateMat(4,4,CV_64FC1);
	//rt_add = cvCreateMat(3,4,CV_64FC1);
	//rt_e = cvCreateMat(4,4,CV_64FC1);

	//double _change[] = {1.64,-0.24,-0.52,0,
	//                    0.27,1.59,-0.02,0,
	//                    -0.1,0.42,1.08,0,
	//                    0,-0.01,0,0};

	//cvInitMatHeader(rt_change,4,4,CV_64FC1,_change);

	//double _add[] = {0,0,0,0.12,
	//			     0,0,0,-0.43,
	//			     0,0,0,-0.17};
	//cvInitMatHeader(rt_add,3,4,CV_64FC1,_add);

	//double _rt_e[] ={1,0,0,0,
	//	0,1,0,0,
	//	0,0,1,0,
	//	0,0,0,1};

	//cvInitMatHeader(rt_e,4,4,CV_64FC1,_rt_e);

	//cvMatMulAdd(mat_rt,rt_e,rt_add,mat_rt);

	//cvMatMulAdd(mat_rt,rt_change,0,mat_rt);

	//Print_Mat(mat_rt);

	cvMatMulAdd(mat_k,mat_rt,0,original);


	return original;

}