Ejemplo n.º 1
0
int main()
{
	CvMat* A = cvCreateMat( 3, 3, CV_64FC1 );
	CvMat* B = cvCreateMat( 3, 1, CV_64FC1 );
	CvMat* X = cvCreateMat( 3, 1, CV_64FC1 );
	A->data.db[0] = 11;
	A->data.db[1] = 5;
	A->data.db[2] = 15;
	A->data.db[3] = 5;
	A->data.db[4] = 3;
	A->data.db[5] = 1;
	A->data.db[6] = 15;
	A->data.db[7] = 1;
	A->data.db[8] = 31;
	B->data.db[0] = 1;
	B->data.db[1] = 2;
	B->data.db[2] = 3;
	printf("A :\n[%lf, %lf, %lf]\n[%lf, %lf, %lf]\n[%lf, %lf, %lf]\n", A->data.db[0], A->data.db[1], A->data.db[2], A->data.db[3], A->data.db[4], A->data.db[5], A->data.db[6], A->data.db[7], A->data.db[8]);
	cvCGSolve( icvMatMulOps, A, B, X );
	printf("x : [%lf, %lf, %lf]\n", X->data.db[0], X->data.db[1], X->data.db[2]);
	cvMatMul( A, X, X );
	printf("Ax : [%lf, %lf, %lf]\n", X->data.db[0], X->data.db[1], X->data.db[2]);
	cvSolve( A, B, X );
	printf("x : [%lf, %lf, %lf]\n", X->data.db[0], X->data.db[1], X->data.db[2]);
	cvMatMul( A, X, X );
	printf("Ax : [%lf, %lf, %lf]\n", X->data.db[0], X->data.db[1], X->data.db[2]);
	return 0;
}
Ejemplo n.º 2
0
void Kalman::predict_P() {
	// P_pred = F*P*trans(F) + Q
	cvTranspose(F, F_trans);
	cvMatMul(P, F_trans, P_pred);
	cvMatMul(F, P_pred, P_pred);
	cvScaleAdd(P_pred, cvScalar(1), Q, P_pred);
}
Ejemplo n.º 3
0
void KalmanSensor::update_P(CvMat *P_pred, CvMat *P) {
	//P = (I - K*H) * P_pred
	cvMatMul(K, H, P_tmp);
	cvSetIdentity(P);
	cvScaleAdd(P_tmp, cvScalar(-1), P, P);
	cvMatMul(P, P_pred, P);
}
Ejemplo n.º 4
0
void KalmanSensorCore::update_x(CvMat *x_pred, CvMat *x) {
	// x = x_pred + K * (z - H*x_pred)
	cvMatMul(H, x_pred, z_pred);
	cvScaleAdd(z_pred, cvScalar(-1), z, z_residual);
	cvMatMul(K, z_residual, x_gain);
	cvScaleAdd(x_pred, cvScalar(1), x_gain, x);
}
Ejemplo n.º 5
0
CvMat* AbstractCamera::computeRtMatrix(double a, double b, double g, double tX, double tY, double tZ) {

	//--- Represent 3d rotation with euler angles

	double sinG = sin(g);
	double cosG = cos(g);
	CvMat* rZg = cvCreateMat(3, 3, CV_32FC1);
	cvSetZero(rZg);
	cvmSet(rZg, 0, 0, cosG);
	cvmSet(rZg, 0, 1, -sinG);
	cvmSet(rZg, 1, 0, sinG);
	cvmSet(rZg, 1, 1, cosG);
	cvmSet(rZg, 2, 2, 1.0f);

	double sinB = sin(b);
	double cosB = cos(b);
	CvMat* rXb = cvCreateMat(3, 3, CV_32FC1);
	cvSetZero(rXb);
	cvmSet(rXb, 0, 0, 1.0f);
	cvmSet(rXb, 1, 1, cosB);
	cvmSet(rXb, 1, 2, -sinB);
	cvmSet(rXb, 2, 1, sinB);
	cvmSet(rXb, 2, 2, cosB);

	double sinA = sin(a);
	double cosA = cos(a);
	CvMat* rZa = cvCreateMat(3, 3, CV_32FC1);
	cvSetZero(rZa);
	cvmSet(rZa, 0, 0, cosA);
	cvmSet(rZa, 0, 1, -sinA);
	cvmSet(rZa, 1, 0, sinA);
	cvmSet(rZa, 1, 1, cosA);
	cvmSet(rZa, 2, 2, 1.0f);

	CvMat* rotationMatrix = cvCreateMat(3, 3, CV_32FC1);
	cvMatMul(rZg, rXb, rotationMatrix);
	cvMatMul(rotationMatrix, rZa, rotationMatrix);

	cvReleaseMat(&rZg);
	cvReleaseMat(&rXb);
	cvReleaseMat(&rZa);

	//--- [R|T] ; First camera rotation and translation matrix
	CvMat* rtMatrix = cvCreateMat(3, 4, CV_32FC1);
	for (int i = 0; i < 3; i++) {
		cvmSet(rtMatrix, i, 0, cvmGet(rotationMatrix, i, 0));
		cvmSet(rtMatrix, i, 1, cvmGet(rotationMatrix, i, 1));
		cvmSet(rtMatrix, i, 2, cvmGet(rotationMatrix, i, 2));
	}
	cvmSet(rtMatrix, 0, 3, tX);
	cvmSet(rtMatrix, 1, 3, tY);
	cvmSet(rtMatrix, 2, 3, tZ);

	cvReleaseMat(&rotationMatrix);

	return rtMatrix;
}
Ejemplo n.º 6
0
void KalmanSensor::update_K(CvMat *P_pred) {
	// K = P * trans(H) * inv(H*P*trans(H) + R)
	cvTranspose(H, H_trans);
	cvMatMul(P_pred, H_trans, K);
	cvMatMul(H, K, R_tmp);
	cvScaleAdd(R_tmp, cvScalar(1), R, R_tmp);
	cvInvert(R_tmp, R_tmp);
	cvMatMul(H_trans, R_tmp, K);
	cvMatMul(P_pred, K, K);
}
Ejemplo n.º 7
0
static void projectImg(IplImage *src, int64_t TRANS_X, int64_t TRANS_Y,
		IplImage *dst, CvMat *tmatrix) {
	if (tmatrix->rows == 2) {
		//translate
		CvMat* result = cvCreateMat(2, 3, CV_32FC1);
		cvSetReal2D(result, 0, 0, cvGetReal2D(tmatrix, 0, 0));
		cvSetReal2D(result, 0, 1, cvGetReal2D(tmatrix, 0, 1));
		cvSetReal2D(result, 1, 0, cvGetReal2D(tmatrix, 1, 0));
		cvSetReal2D(result, 1, 1, cvGetReal2D(tmatrix, 1, 1));
		cvSetReal2D(result, 0, 2, cvGetReal2D(tmatrix, 0, 2) + TRANS_X);
		cvSetReal2D(result, 1, 2, cvGetReal2D(tmatrix, 1, 2) + TRANS_Y);
		cvWarpAffine(src, dst, result, CV_INTER_LINEAR, cvScalarAll(0));
		cvReleaseMat(&result);
	} else if (tmatrix->rows == 3) {
		//translate matrix
		CvMat* offset = cvCreateMat(3, 3, CV_32FC1);
		cvSetReal2D(offset, 0, 0, 1);
		cvSetReal2D(offset, 0, 1, 0);
		cvSetReal2D(offset, 0, 2, TRANS_X);
		cvSetReal2D(offset, 1, 0, 0);
		cvSetReal2D(offset, 1, 1, 1);
		cvSetReal2D(offset, 1, 2, TRANS_Y);
		cvSetReal2D(offset, 2, 0, 0);
		cvSetReal2D(offset, 2, 1, 0);
		cvSetReal2D(offset, 2, 2, 1);
		//translate
		CvMat* result = cvCreateMat(3, 3, CV_32FC1);
		cvMatMul(offset, tmatrix, result);
		cvWarpPerspective(src, dst, result, CV_INTER_LINEAR, cvScalarAll(0));
		cvReleaseMat(&offset);
		cvReleaseMat(&result);
	}
}
Ejemplo n.º 8
0
/**
 * Calculate: K P R|T
 */
CvMat* MultipleViewGeomOld::calculateProjectionMatrix(CvMat *rtMat) {


    // 3 rows, 4 columns
    CvMat* kpMat = cvCreateMat(3, 4, CV_32FC1);
    cvMatMul(kMat,pMat,kpMat);

    CvMat* projMat = cvCreateMat(3, 4, CV_32FC1);
    cvMatMul(kpMat,rtMat,projMat);

    projMat = cvCloneMat(projMat);

    LOG4CPLUS_DEBUG(myLogger,"Projection K P R|T matrix" << endl << printCvMat(projMat));

    return projMat;
}
void CModel::Draw(IplImage* img, CvMat* CamMat, CvScalar color, int thickness)
{
	int nEdges = Edges.size();
	int nVertexes = Vertexes.size();

	// Tao mang anh chieu
	vector<CvMat*> ProjectedVertex;

	ProjectedVertex.resize(Vertexes.size());
	for (int i = 0; i < nVertexes; i++)
		ProjectedVertex[i] = cvCreateMat(3,1,CV_32FC1);

	// Thuc hien phep chieu cac dinh
	for (int i = 0; i < nVertexes; i++)
	{
		cvMatMul(CamMat,Vertexes[i],ProjectedVertex[i]);
		CV_MAT_ELEM(*ProjectedVertex[i],float,0,0) = 
			CV_MAT_ELEM(*ProjectedVertex[i],float,0,0)/CV_MAT_ELEM(*ProjectedVertex[i],float,2,0);
		CV_MAT_ELEM(*ProjectedVertex[i],float,1,0) = 
			CV_MAT_ELEM(*ProjectedVertex[i],float,1,0)/CV_MAT_ELEM(*ProjectedVertex[i],float,2,0);
	}

	for (int i = 0; i < nEdges; i++)
	{
		cvLine(img, 
			cvPoint(CV_MAT_ELEM(*ProjectedVertex[Edges[i].idxFrm],float,0,0), CV_MAT_ELEM(*ProjectedVertex[Edges[i].idxFrm],float,1,0)),
			cvPoint(CV_MAT_ELEM(*ProjectedVertex[Edges[i].idxTo], float,0,0), CV_MAT_ELEM(*ProjectedVertex[Edges[i].idxTo], float,1,0)),
			color, thickness);
	}
	for (int i = 0; i < nVertexes; i++)
		cvReleaseMat(&ProjectedVertex[i]);
	ProjectedVertex.clear();
}
Ejemplo n.º 10
0
Point2D getOptFlow(IplImage* currentFrame,Point2D p,IplImage* preFrame)
{
	Point2D temp;
	double b[2];
	b[0]=0;b[1]=0;
	
	double M11=0,M12=0,M22=0;
	for(int i = -OPTICAL_FLOW_POINT_AREA/2; i < OPTICAL_FLOW_POINT_AREA/2; i++)
	{
		for (int j = -OPTICAL_FLOW_POINT_AREA/2;j < OPTICAL_FLOW_POINT_AREA/2;j++)
		{
			temp = partial(currentFrame,Point2D(p.row+i,p.col+j));
			M11 += temp.dcol*temp.dcol;
			M12 += temp.dcol*temp.drow;
			M22 += temp.drow*temp.drow;
			b[0] += temp.dcol*(pixval8U(currentFrame,p.row+i,p.col+j)-pixval8U(preFrame,p.row+i,p.col+j));
			b[1] += temp.drow*(pixval8U(currentFrame,p.row+i,p.col+j)-pixval8U(preFrame,p.row+i,p.col+j));
		}
	}
	double a[] = {M11,M12,M12,M22};
	CvMat M=cvMat(2, 2, CV_64FC1, a);
	CvMat *Mi = cvCloneMat(&M);
	cvInvert(&M,Mi,CV_SVD);
	temp.col=0;
	temp.row=0;
	b[0] = -b[0];
	b[1] = -b[1];
	CvMat Mb = cvMat(2,1,CV_64FC1,b);
	CvMat *Mr = cvCloneMat(&Mb);
	cvMatMul( Mi, &Mb, Mr);
	double vy = (cvmGet(Mr,1,0));
	double vx = (cvmGet(Mr,0,0));
	
	return (Point2D(vy,vx));
}
Ejemplo n.º 11
0
/** Computes the optical center of camera from camera projection matrix (http://en.wikipedia.org/wiki/Camera_matrix )
* @param pM : Camera projection matrix (3x4).
* @return : Optical center of camera.
*/
Point3D Utility::getOpticalCenter( CvMat* pM )
{
	CvMat *A = cvCreateMat(3, 3, CV_64FC1);
	CvMat *Ainv = cvCreateMat(3, 3, CV_64FC1);
	CvMat *b = cvCreateMat(3, 1, CV_64FC1);
	for(int i=0; i<3; ++i)
	{
		for(int j=0; j<3; ++j)
			cvmSet(A, i, j, cvmGet(pM,i,j));
		cvmSet(b, i, 0, cvmGet(pM, i,3));
	}
	cvInvert(A, Ainv);
	CvMat *oc = cvCreateMat(3, 1, CV_64FC1);
	cvMatMul(Ainv, b, oc);
	Point3D toRet;
	toRet.x = -1 * cvmGet(oc, 0, 0);				//NULL SPACE OF MATRIX pM
	toRet.y = -1 * cvmGet(oc, 1, 0);
	toRet.z = -1 * cvmGet(oc, 2, 0);

	cvReleaseMat(&A);
	cvReleaseMat(&Ainv);
	cvReleaseMat(&b);
	cvReleaseMat(&oc);
	return toRet;
}
Ejemplo n.º 12
0
/*! Transform the grid with given homograhy and average colors over
 * triangles.
 */
void LightCollector::averageImage(IplImage *im, CvMat *_homography)
{
	if (avgChannels != im->nChannels) {
		if (avgChannels < im->nChannels) { 
			delete[] avg;
			avg = 0;
		}
		avgChannels = im->nChannels;
	}
	if (!avg) avg = new float[avgChannels*nbTri];
	
	// apply the homography to every mesh vertex
	if (_homography)
		cvMatMul(_homography, vertices, transformed);
	else
		cvCopy(vertices, transformed);
	CvMat r1,r2,r3;
	cvGetRow(transformed, &r1, 0);
	cvGetRow(transformed, &r2, 1);
	cvGetRow(transformed, &r3, 2);
	cvDiv(&r1,&r3,&r1);
	cvDiv(&r2,&r3,&r2);
	
	nbPix=0;
	for (int t=0; t<nbTri;t++) {
		int pts[3][2];
		for (int i=0; i<3; i++) {
			assert(triangles[t*3+i] < transformed->cols);
			pts[i][0] = cvRound(CV_MAT_ELEM(*transformed, float, 0, triangles[t*3+i]));
			pts[i][1] = cvRound(CV_MAT_ELEM(*transformed, float, 1, triangles[t*3+i]));
		}
		nbPix+=stat_triangle(im, pts, avg+t*avgChannels);
	}
}
Ejemplo n.º 13
0
int calculateWidthInPixels(CvMat* P, float Y){
  float W = 0.10; //width of road 20cm ~ 0.2m 
  float w = 0.0; //width of the roads in pixels

  CvMat tmp;
  //create P_1 (row 1 of matrix P)
  CvMat *P_1 = cvCreateMat(1,4,CV_32FC1);
  cvGetRow(P,&tmp,0); //row 0
  cvCopy(&tmp,P_1,NULL);

  CvMat *P_3 = cvCreateMat(1,4,CV_32FC1);
  cvGetRow(P,&tmp,2); //row 2
  cvCopy(&tmp,P_3,NULL);

  CvMat* X_1 = cvCreateMat(4,1,CV_32FC1);
  CvMat* X_2 = cvCreateMat(4,1,CV_32FC1);
  CvMat* P_1_times_X_1 = cvCreateMat(1,1,CV_32FC1);
  CvMat* P_3_times_X_1 = cvCreateMat(1,1,CV_32FC1);
  CvMat* P_1_times_X_2 = cvCreateMat(1,1,CV_32FC1);
  CvMat* P_3_times_X_2 = cvCreateMat(1,1,CV_32FC1);

  cvmSet(X_1,0,0,W);
  cvmSet(X_1,1,0,Y);
  cvmSet(X_1,2,0,0.0);
  cvmSet(X_1,3,0,1.0);

  cvmSet(X_2,0,0,0);
  cvmSet(X_2,1,0,Y);
  cvmSet(X_2,2,0,0);
  cvmSet(X_2,3,0,1);

  cvMatMul(P_1,X_1,P_1_times_X_1);
  cvMatMul(P_3,X_1,P_3_times_X_1);	
  cvMatMul(P_1,X_2,P_1_times_X_2);
  cvMatMul(P_3,X_2,P_3_times_X_2);	

  w = ((cvmGet(P_1_times_X_1,0,0) /
        cvmGet(P_3_times_X_1,0,0)
       ) 
      -
      (cvmGet(P_1_times_X_2,0,0) /
       cvmGet(P_3_times_X_2,0,0)
      )); 


  return int(w+0.5);
}
FLOAT_POINT2D mcvGetVanishingPoint(const CameraInfo *cameraInfo)
{
    //get the vp in world coordinates
    FLOAT_MAT_ELEM_TYPE vpp[] = {sin(cameraInfo->yaw)/cos(cameraInfo->pitch),
                                 cos(cameraInfo->yaw)/cos(cameraInfo->pitch), 0};
    CvMat vp = cvMat(3, 1, FLOAT_MAT_TYPE, vpp);

    //transform from world to camera coordinates
    //
    //rotation matrix for yaw
    FLOAT_MAT_ELEM_TYPE tyawp[] = {cos(cameraInfo->yaw), -sin(cameraInfo->yaw), 0,
                                   sin(cameraInfo->yaw), cos(cameraInfo->yaw), 0,
                                   0, 0, 1};
    CvMat tyaw = cvMat(3, 3, FLOAT_MAT_TYPE, tyawp);
    //rotation matrix for pitch
    FLOAT_MAT_ELEM_TYPE tpitchp[] = {1, 0, 0,
                                     0, -sin(cameraInfo->pitch), -cos(cameraInfo->pitch),
                                     0, cos(cameraInfo->pitch), -sin(cameraInfo->pitch)};
    CvMat transform = cvMat(3, 3, FLOAT_MAT_TYPE, tpitchp);
    //combined transform
    cvMatMul(&transform, &tyaw, &transform);

    //
    //transformation from (xc, yc) in camra coordinates
    // to (u,v) in image frame
    //
    //matrix to shift optical center and focal length
    FLOAT_MAT_ELEM_TYPE t1p[] = {
        cameraInfo->focalLength.x, 0,
        cameraInfo->opticalCenter.x,
        0, cameraInfo->focalLength.y,
        cameraInfo->opticalCenter.y,
        0, 0, 1};
    CvMat t1 = cvMat(3, 3, FLOAT_MAT_TYPE, t1p);
    //combine transform
    cvMatMul(&t1, &transform, &transform);
    //transform
    cvMatMul(&transform, &vp, &vp);

    //
    //clean and return
    //
    FLOAT_POINT2D ret;
    ret.x = cvGetReal1D(&vp, 0);
    ret.y = cvGetReal1D(&vp, 1);
    return ret;
}
Ejemplo n.º 15
0
//============================================================================
void AAM_Basic::CalcGradientMatrix(const CvMat* CParams, 
								   const CvMat* vCDisps,
								   const CvMat* vPoseDisps,
								   const std::vector<AAM_Shape>& AllShapes, 
								   const std::vector<IplImage*>& AllImages)
{
	int npixels = __cam.__texture.nPixels();
	int np = __cam.nModes();

	// do model parameter experiments
    {
		printf("Calculating parameter gradient matrix...\n");
		CvMat* GParam = cvCreateMat(np, npixels, CV_64FC1);cvZero(GParam);
        CvMat* GtG = cvCreateMat(np, np, CV_64FC1);
        CvMat* GtGInv = cvCreateMat(np, np, CV_64FC1);
        
		// estimate Rc
		EstCParamGradientMatrix(GParam, CParams, AllShapes, AllImages, vCDisps);
        __Rc = cvCreateMat(np, npixels, CV_64FC1);
        cvGEMM(GParam, GParam, 1, NULL, 0, GtG, CV_GEMM_B_T);
        cvInvert(GtG, GtGInv, CV_SVD );
		cvMatMul(GtGInv, GParam, __Rc);
        
		cvReleaseMat(&GtG);
		cvReleaseMat(&GtGInv);
		cvReleaseMat(&GParam);
    }

	// do pose experiments, this is for global shape normalization
    {
		printf("Calculating pose gradient matrix...\n");
		CvMat* GtG = cvCreateMat(4, 4, CV_64FC1);
        CvMat* GtGInv = cvCreateMat(4, 4, CV_64FC1);
        CvMat* GPose = cvCreateMat(4, npixels, CV_64FC1);   cvZero(GPose);

        // estimate Rt
        EstPoseGradientMatrix(GPose, CParams, AllShapes, AllImages, vPoseDisps);
        __Rq = cvCreateMat(4, npixels, CV_64FC1);
        cvGEMM(GPose, GPose, 1, NULL, 0, GtG, CV_GEMM_B_T);
        cvInvert(GtG, GtGInv, CV_SVD);
		cvMatMul(GtGInv, GPose, __Rq);
        
		cvReleaseMat(&GtG);
		cvReleaseMat(&GtGInv);
		cvReleaseMat(&GPose);
    }
}
Ejemplo n.º 16
0
CvMat* camcalib::matMul(const CvMat* A, const CvMat* B) 
{
  assert(A->cols == B->rows);

  CvMat* M = cvCreateMat(A->rows, B->cols, A->type);
  cvMatMul(A, B, M);
  return M;
}
Ejemplo n.º 17
0
	void BazARTracker::show_result(CamAugmentation &augment, IplImage *video, IplImage **dst)
	{
		if (getDebugMode()){
			if (*dst==0) *dst=cvCloneImage(video);
			else cvCopy(video, *dst);
		}

		CvMat *m  = augment.GetProjectionMatrix(0);
		// Flip...(This occured from OpenGL origin / camera origin)
		CvMat *coordinateTrans = cvCreateMat(3, 3, CV_64F);
		cvmSetIdentity(coordinateTrans);
		cvmSet(coordinateTrans, 1, 1, -1);
		cvmSet(coordinateTrans, 1, 2, m_cparam->cparam.ysize);
		cvMatMul(coordinateTrans, m, m);
	
		// extract intrinsic camera parameters from bazar's projection matrix..
		GetARToolKitRTfromBAZARProjMat(g_matIntrinsic, m, matCameraRT4_4);
			
		cvTranspose(matCameraRT4_4, matCameraRT4_4);
		cvReleaseMat(&coordinateTrans);

		// Debug
		if (getDebugMode()) {
			// draw the coordinate system axes
			double w =video->width/2.0;
			double h =video->height/2.0;

			// 3D coordinates of an object
			double pts[4][4] = {
				{w,h,0, 1}, // 0,0,0,1
				{w*2,h,0, 1}, // w, 0
				{w,h*2,0, 1}, // 0, h
				{w,h,-w-h, 1} // 0, 0, -
			};

			CvMat ptsMat, projectedMat;
			cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
			cvInitMatHeader(&projectedMat, 3, 4, CV_64FC1, projected);
		
			cvGEMM(m, &ptsMat, 1, 0, 0, &projectedMat, CV_GEMM_B_T );

			for (int i=0; i<4; i++) 
			{
				projected[0][i] /= projected[2][i];
				projected[1][i] /= projected[2][i];
			}

			// draw the projected lines
			cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
				cvPoint((int)projected[0][1], (int)projected[1][1]), CV_RGB(255,0,0), 2);
			cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
				cvPoint((int)projected[0][2], (int)projected[1][2]), CV_RGB(0,255,0), 2);
			cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
				cvPoint((int)projected[0][3], (int)projected[1][3]), CV_RGB(0,0,255), 2);
		}
	}
Ejemplo n.º 18
0
bool CFeatureExtraction::DoPCA(CvMat * pMat, CvMat * pResultMat, int nSize, int nExpectedSize)
{
	printf("\nCFeatureExtraction::DoPCA in\n");
	int i;	

	printf("DoPCA: Sort our data sets in a vector each\n");	
	// Sort our data sets in a vector each
	CvMat ** pDataSet = new CvMat*[m_nWidth*m_nHeight];
	float * pData = pMat->data.fl;
	for (i=0;i<m_nWidth*m_nHeight;i++)
	{
		pDataSet[i] = (CvMat*) malloc(sizeof(CvMat));
		cvInitMatHeader(pDataSet[i], 1, nSize, CV_32FC1, &pData[i*nSize]);
	}
	
	printf("DoPCA: Calc covariance matrix\n");
	// Calc covariance matrix
	CvMat* pCovMat = cvCreateMat( nSize, nSize, CV_32F );
	CvMat* pMeanVec = cvCreateMat( 1, nSize, CV_32F );
	
	cvCalcCovarMatrix( (const void **)pDataSet, m_nWidth*m_nHeight, pCovMat, pMeanVec, CV_COVAR_SCALE | CV_COVAR_NORMAL );
	
	cvReleaseMat(&pMeanVec);
	
	printf("DoPCA: Do the SVD decomposition\n");
	// Do the SVD decomposition
	CvMat* pMatW = cvCreateMat( nSize, 1, CV_32F );
	CvMat* pMatV = cvCreateMat( nSize, nSize, CV_32F );
	CvMat* pMatU = cvCreateMat( nSize, nSize, CV_32F );
	
	cvSVD(pCovMat, pMatW, pMatU, pMatV, CV_SVD_MODIFY_A+CV_SVD_V_T);
	
	cvReleaseMat(&pCovMat);
	cvReleaseMat(&pMatW);
	cvReleaseMat(&pMatV);

	printf("DoPCA: Extract the requested number of dominant eigen vectors\n");
	// Extract the requested number of dominant eigen vectors
	CvMat* pEigenVecs = cvCreateMat( nSize, nExpectedSize, CV_32F );
	for (i=0;i<nSize;i++)
		memcpy(&pEigenVecs->data.fl[i*nExpectedSize], &pMatU->data.fl[i*nSize], nExpectedSize*sizeof(float));

	printf("DoPCA: Transform to the new basis\n");
	// Transform to the new basis	
	cvMatMul(pMat, pEigenVecs, pResultMat);
	cvReleaseMat(&pMatU);
	cvReleaseMat(&pEigenVecs);
	
	for (i = 0; i < m_nHeight * m_nWidth; i++)
		delete [] pDataSet[i];
	delete [] pDataSet;

	printf("CFeatureExtraction::DoPCA out\n");
	return true;
}
Ejemplo n.º 19
0
int main (int argc, char **argv)
{
	int i, j;
	int nrow = 3;
	int ncol = 3;
	CvMat *src, *dst, *mul;
	double det;
	CvRNG rng = cvRNG (time (NULL));      /* 乱数の初期化 */
	// (1) 行列のメモリ確保
	src = cvCreateMat (nrow, ncol, CV_32FC1);
	dst = cvCreateMat (ncol, nrow, CV_32FC1);
	mul = cvCreateMat (nrow, nrow, CV_32FC1);
	// (2) 行列srcに乱数を代入
	printf ("src\n");
	cvmSet (src, 0, 0, 1);
	for (i = 0; i < src->rows; i++)
	{
		for (j = 0; j < src->cols; j++)
		{
			cvmSet (src, i, j, cvRandReal (&rng));
			printf ("% lf\t", cvmGet (src, i, j));
		}
		printf ("\n");
	}
	// (3) 行列srcの逆行列を求めて,行列dstに代入
	det = cvInvert (src, dst, CV_SVD);
	// (4) 行列srcの行列式を表示
	printf ("det(src)=%lf\n", det);
	// (5) 行列dstの表示
	printf ("dst\n");
	for (i = 0; i < dst->rows; i++)
	{
		for (j = 0; j < dst->cols; j++)
		{
			printf ("% lf\t", cvmGet (dst, i, j));
		}
		printf ("\n");
	}
	// (6) 行列srcとdstの積を計算して確認
	cvMatMul (src, dst, mul);
	printf ("mul\n");
	for (i = 0; i < mul->rows; i++)
	{
		for (j = 0; j < mul->cols; j++)
		{
			printf ("% lf\t", cvmGet (mul, i, j));
		}
		printf ("\n");
	}
	// (7) 行列のメモリを開放
	cvReleaseMat (&src);
	cvReleaseMat (&dst);
	cvReleaseMat (&mul);
	return 0;
}
Ejemplo n.º 20
0
double cvCGSolve( CvMat* A, CvMat* B, CvMat* X, CvTermCriteria term_crit )
{
	cvZero( X );
	CvMat* R = cvCloneMat( B );
	double delta = cvDotProduct( R, R );
	CvMat* D = cvCloneMat( R );
	double delta0 = delta;
	double bestres = 1.;
	CvMat* BX = cvCloneMat( X );
	CvMat* Q = cvCreateMat( X->rows, X->cols, CV_MAT_TYPE(X->type) );
	for ( int i = 0; i < term_crit.max_iter; i++ )
	{
		cvMatMul( A, D, Q );
		double alpha = delta / cvDotProduct( D, Q );
		cvScaleAdd( D, cvScalar(alpha), X, X );
		if ( (i + 1) % 50 == 0 )
		{
			cvMatMul( A, X, R );
			cvSub( B, R, R );
		} else {
			cvScaleAdd( Q, cvScalar(-alpha), R, R );
		}
		double deltaold = delta;
		delta = cvDotProduct( R, R );
		double beta = delta / deltaold;
		cvScaleAdd( D, cvScalar(beta), R, D );
		double res = delta / delta0;
		if ( res < bestres )
		{
			cvCopy( X, BX );
			bestres = res;
		}
		if ( delta < delta0 * term_crit.epsilon )
			break;
	}
	cvCopy( BX, X );
	cvReleaseMat( &R );
	cvReleaseMat( &D );
	cvReleaseMat( &BX );
	cvReleaseMat( &Q );
	return sqrt( bestres );
}
// 
// Rotate around the y axis
void yRotate(CvMat *vec, float angle, CvMat *res)
{
    angle = D2R(angle);
    float vals[] = { cos(angle), 0, -sin(angle), 0,
                     0, 1, 0, 0,
                     sin(angle), 0, cos(angle), 0,
                     0, 0, 0, 1};    
    CvMat mat;
    cvInitMatHeader( &mat, 4, 4, CV_32FC1, &vals );
    cvMatMul(&mat, vec, res);
}
Ejemplo n.º 22
0
static void augment_scene(CalibModel &model, IplImage *frame, IplImage *display)
{
  cvCopy(frame, display);

  if (!model.detector.object_is_detected) 
    return;

  CvMat *m = model.augm.GetProjectionMatrix(0);
  if (!m) return;

  double pts[4][4];
  double proj[4][4];
  CvMat ptsMat, projMat;
  cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
  cvInitMatHeader(&projMat, 3, 4, CV_64FC1, proj);
  for (int i=0; i<4; i++) {
    pts[0][i] = model.corners[i].x;
    pts[1][i] = model.corners[i].y;
    pts[2][i] = 0;
    pts[3][i] = 1;
  }
  cvMatMul(m, &ptsMat, &projMat);
  cvReleaseMat(&m);

  CvPoint projPts[4];
  for (int i=0;i<4; i++) {
    projPts[i].x = cvRound(proj[0][i]/proj[2][i]);
    projPts[i].y = cvRound(proj[1][i]/proj[2][i]);
  }

  CvMat *o2w = model.augm.GetObjectToWorld();
  float normal[3];
  for (int j=0;j<3;j++)
    normal[j] = cvGet2D(o2w, j, 2).val[0];
  cvReleaseMat(&o2w);

  // we want to relight a color present on the model image
  // with an irradiance coming from the irradiance map
  CvScalar color = cvGet2D(model.image, model.image->height/2, model.image->width/2);
  CvScalar irradiance = model.map.readMap(normal);

  // the camera has some gain and bias
  const float *g = model.map.getGain(0);
  const float *b = model.map.getBias(0);

  // relight the 3 RGB channels. The bias value expects 0 black 1 white,
  // but the image are stored with a white value of 255: Conversion is required.
  for (int i=0; i<3; i++) {
    color.val[i] = 255.0*(g[i]*(color.val[i]/255.0)*irradiance.val[i] + b[i]);
  }

  // draw a filled polygon with the relighted color
  cvFillConvexPoly(display, projPts, 4, color);
}
Ejemplo n.º 23
0
PerspectiveTransform * PerspectiveTransform::accumulateInverse(const Transform * T2) const
{
    PerspectiveTransform * m = new PerspectiveTransform;
    PerspectiveTransform T2_inv(*dynamic_cast<const PerspectiveTransform *>(T2)); //make a copy

    cvInvert(T2_inv, T2_inv);

    cvMatMul(T2_inv, *this, *m); //don't need casts as opencv will check dims agree.
    matMul(T2_inv.data.db, data.db, m->data.db, 3, 3, 3);
    return m;
}
Ejemplo n.º 24
0
/**
 *	OptimizePair:
 *		Input:
 *			cam1 - the first camera (already optimized)
 *          cam2 - the second camera with its intrinsic matrix initialized
 *			dR - an initial relative 3x3 camera rotation matrix
 *          set1 - SIFT features in the first image
 *          set2 - SIFT features in the second image
 *          aryInlier - the homography iniliers
 *
 *		Ouput:
 *          cam2 - update cam2's optimized focal length and pose
 */
void OptimizeSingle( const CCamera& cam1, CCamera& cam2,
					double* dR,
					const CFeatureArray& set1,
					const CFeatureArray& set2, 
					const MatchArray& aryInlier )
{
	// Step 1. Initialize the camera pose of cam2

	// cam2's relative rotation to cam1
	CvMat matR = cvMat( 3, 3, CV_64F, dR );

	// cam1's absolute rotation
	double dRod1[3];
	CvMat matRod1 = cvMat( 3, 1, CV_64F, dRod1 );
	cam1.GetPose( dRod1 );

	double dRot1[9];
	CvMat matRot1 = cvMat( 3, 3, CV_64F, dRot1 );
	cvRodrigues2( &matRod1, &matRot1 );

	// compose R and Rot1 to get cam2's initial absolute rotation
	cvMatMul( &matR, &matRot1, &matR );

	double dRod2[3];
	CvMat matRod2 = cvMat( 3, 1, CV_64F, dRod2 );

	cvRodrigues2( &matR, &matRod2 );
	cam2.SetPose( dRod2 );

	// Step 2. Now we can perform bundle adjustment for cam2
	CBundleAdjust ba( 1, BA_ITER );
	ba.SetCamera( &cam2, 0 );

	// set points
	for( int i=0; i<aryInlier.size(); ++i )
	{
		const CFeature* ft1 = set1[ aryInlier[i].first ];
		const CFeature* ft2 = set2[ aryInlier[i].second ];

		double dir[3];
		cam1.GetRayDirectionWS( dir, cvPoint2D64f( ft1->x, ft1->y ) );
		
		// the 3d position
		CvPoint3D64f pt3 = cvPoint3D64f( dir[0]*radius, dir[1]*radius, dir[2]*radius );

		ba.SetPointProjection( pt3, 0, cvPoint2D64f( ft2->x, ft2->y ) );
	}

	ba.DoMotion();

	ba.GetAdjustedCamera( &cam2, 0 );
}
Ejemplo n.º 25
0
float MixGaussian::GetProbability(CvMat * Sample)
{
	double P = 0.0;
	CvMat *diffMat = cvCloneMat(Sample);
	CvMat *diffMatT = cvCreateMat(1, _nDim, CV_64FC1);
	double expo;
	CvMat expoMat = cvMat(1, 1, CV_64FC1, &expo);

	for(int k = 0; k < _nMixture ; k++) {
		cvSub(Sample, _MeanMat[k], diffMat);
		cvTranspose(diffMat, diffMatT);
		cvMatMul(_CovMatI[k], diffMat, diffMat);
		cvMatMul(diffMatT, diffMat, &expoMat);
		expo *= (-0.5);
		P += (_Weight[k] * 1.0 / (pow(2 * CV_PI, 1.5) * sqrt(cvDet(_CovMat[k]))) * exp(expo));
	}

	cvReleaseMat(&diffMat);
	cvReleaseMat(&diffMatT);

	return P;
}
Ejemplo n.º 26
0
//!Apply to a matrix of points
void AffineTransform::applyToPoints(const CvMat * positions, CvMat * newPositions) const
{
    CvMat newPositions2d;
    cvGetSubRect(newPositions, &newPositions2d, cvRect(0,0,newPositions->cols, 2));
	cvMatMul(*this, positions, &newPositions2d);

    for(int i=0; i<newPositions->cols; i++)
    {
        cvmSet(newPositions, 0, i, cvmGet(&newPositions2d, 0, i));
        cvmSet(newPositions, 1, i, cvmGet(&newPositions2d, 1, i));
        cvmSet(newPositions, 2, i, 1.0);
    }
}
Ejemplo n.º 27
0
/*
  Performs a perspective transformation on a single point.  That is, for a
  point (x, y) and a 3 x 3 matrix T this function returns the point
  (u, v), where
  
  [x' y' w']^T = T * [x y 1]^T,
  
  and
  
  (u, v) = (x'/w', y'/w').

  Note that affine transforms are a subset of perspective transforms.
  
  @param pt a 2D point
  @param T a perspective transformation matrix
  
  @return Returns the point (u, v) as above.
*/
CvPoint2D64f persp_xform_pt( CvPoint2D64f pt, CvMat* T )
{
  CvMat XY, UV;
  double xy[3] = { pt.x, pt.y, 1.0 }, uv[3] = { 0 };
  CvPoint2D64f rslt;

  cvInitMatHeader( &XY, 3, 1, CV_64FC1, xy, CV_AUTOSTEP );
  cvInitMatHeader( &UV, 3, 1, CV_64FC1, uv, CV_AUTOSTEP );
  cvMatMul( T, &XY, &UV );
  rslt = cvPoint2D64f( uv[0] / uv[2], uv[1] / uv[2] );

  return rslt;
}
Ejemplo n.º 28
0
/*
mask should be a symmetric filter and its size should be an odd number
*/
int STBuffer::TemporalConvolve(IplImage* dst,std::vector<double> mask)
{
	int	tfsz=(int)mask.size();
	assert(tfsz<=BufferSize);

	int i;


	if(BufferSize)
	{
		assert(dst->widthStep * dst->height == Buffer->step);
		assert(BufferSize>=3);
	}

	assert(tfsz%2); //the size of filter should be odd
	
	int tstampres=FrameIndices.Middle(tfsz);
	

	if((int)mask.size()<BufferSize)
		for(i=(int)mask.size();i<BufferSize;i++)
			mask.push_back(0);

	



	std::vector<int> Sorted =FrameIndices.GetSortedIndices();

	CvMat *fil=cvCreateMat(1,BufferSize,DATATYPE);
	assert(BufferSize==(int)mask.size()); //filter is too big (it could be cut)
	IMG_ELEM_TYPE* filter=new IMG_ELEM_TYPE[BufferSize];
	
	for(i=0;i<BufferSize;i++)
		filter[Sorted[i]]=(IMG_ELEM_TYPE)mask[i];

	for(i=0;i<BufferSize;i++)
		cvmSet(fil,0,i, filter[i]);


	
	CvMat *rdst, dsthdr;
	rdst = cvReshape(dst,&dsthdr,0,1);
	cvMatMul(fil,Buffer,rdst);


	delete[] filter;
	cvReleaseMat(&fil);

	return tstampres;
}
Ejemplo n.º 29
0
bool LightCollector::genGrid(float corners[4][2], int nx, int ny)
{
	if (nx<1 || ny<1) return false;
	if (avg) delete[] avg; avg=0;
	if (vertices) cvReleaseMat(&vertices);
	if (transformed) cvReleaseMat(&transformed);

	// generate vertices
	vertices = cvCreateMat(3, (nx+1)*(ny+1), CV_32FC1);
	transformed = cvCreateMat(3, vertices->cols, CV_32FC1);
	for (int y=0; y<(ny+1); ++y)
		for (int x=0; x<(nx+1); ++x) {
			CV_MAT_ELEM(*vertices, float, 0, y*(nx+1)+x) = float(x)/float(nx);
			CV_MAT_ELEM(*vertices, float, 1, y*(nx+1)+x) = float(y)/float(ny);
			CV_MAT_ELEM(*vertices, float, 2, y*(nx+1)+x) = 1;
		}

	// generate triangles
	nbTri = nx*ny*2;
	triangles = new int[nbTri*3];
	int *tri = triangles;
	for (int y=0; y<ny; ++y)
		for (int x=0; x<nx; ++x) {
			tri[0] = y*(nx+1)+x;
			tri[1] = y*(nx+1)+x+1;
			tri[2] = (y+1)*(nx+1)+x;
			tri+=3;
			tri[0] = y*(nx+1)+x+1;
			tri[1] = (y+1)*(nx+1)+x+1;
			tri[2] = (y+1)*(nx+1)+x;
			tri+=3;
		}

	homography H;
	if (!H.estimate(0, 0, corners[0][0], corners[0][1],
				1, 0, corners[1][0], corners[1][1],
				1, 1, corners[2][0], corners[2][1],
				0, 1, corners[3][0], corners[3][1]))
		return false;

	cvMatMul(&H, vertices, transformed);
	CvMat r1,r2,r3, d1, d2;
	cvGetRow(transformed, &r1, 0);
	cvGetRow(transformed, &r2, 1);
	cvGetRow(transformed, &r3, 2);
	cvGetRow(vertices, &d1, 0);
	cvGetRow(vertices, &d2, 1);
	cvDiv(&r1,&r3,&d1);
	cvDiv(&r2,&r3,&d2);
	return true;
}
Ejemplo n.º 30
0
/*计算点pt经透视变换后的点,即给定一点pt和透视变换矩阵T,计算变换后的点
给定点(x,y),变换矩阵M,计算[x',y',w']^T = M * [x,y,1]^T(^T表示转置),
则变换后的点是(u,v) = (x'/w', y'/w')
注意:仿射变换是透视变换的特例
参数:
pt:一个二维点
T:透视变换矩阵
返回值:pt经透视变换后的点
*/
CvPoint2D64f persp_xform_pt(CvPoint2D64f pt, CvMat* T)
{
	CvMat XY, UV;		 //XY:点pt对应的3*1列向量,UV:pt变换后的点对应的3*1列向量 
	double xy[3] = { pt.x, pt.y, 1.0 }, uv[3] = { 0 };		//对应的数据  
	CvPoint2D64f rslt;		//结果  

	//初始化矩阵头  
	cvInitMatHeader(&XY, 3, 1, CV_64FC1, xy, CV_AUTOSTEP);
	cvInitMatHeader(&UV, 3, 1, CV_64FC1, uv, CV_AUTOSTEP);
	cvMatMul(T, &XY, &UV);		//计算矩阵乘法,T*XY,结果放在UV中  
	rslt = cvPoint2D64f(uv[0] / uv[2], uv[1] / uv[2]);		//得到转换后的点  

	return rslt;
}