예제 #1
0
파일: Kalman.cpp 프로젝트: tzwenn/alvar
KalmanCore::KalmanCore(const KalmanCore &s) {
	n = s.n;
	x = cvCloneMat(s.x);
	F = cvCloneMat(s.F);
	x_pred = cvCloneMat(s.x_pred);
	F_trans = cvCloneMat(s.F_trans);
}
예제 #2
0
파일: cvext_io.cpp 프로젝트: liangfu/dnn
CVAPI(void) cvShowImageEx(const char * id, const CvArr * arr,
                          const int cm)
{
  CvMat * src, src_stub;
  double minval, maxval, maxdiff; CvPoint minloc, maxloc;
  int type = cvGetElemType(arr);
  CvMat * disp, * src_scaled;
  int i, j;
  
  if (!CV_IS_MAT(arr))
    src = cvGetMat(arr, &src_stub);
  else{
    src = (CvMat*)arr;
  }

  src = cvCloneMat(src);
  if ( (src->rows<60) || (src->rows<60) )
  {
    CvMat * orig = cvCloneMat(src);
    int scale=60./MIN(orig->rows, orig->cols);
    cvReleaseMat(&src);
    src = cvCreateMat(orig->rows*scale, orig->cols*scale,
                      CV_MAT_TYPE(orig->type));
    int m,n;
    if (CV_MAT_TYPE(src->type)==CV_64F){
      for (m=0;m<orig->rows;m++) {
      for (n=0;n<orig->cols;n++) {
      for (i=0;i<scale;i++) {
      for (j=0;j<scale;j++) {
        CV_MAT_ELEM(*src, double, m*scale+i, n*scale+j) = CV_MAT_ELEM(*orig, double, m, n);
      }
      }
      }
      }
    }else if (CV_MAT_TYPE(src->type)==CV_32F){
예제 #3
0
Point2D getOptFlow(IplImage* currentFrame,Point2D p,IplImage* preFrame)
{
	Point2D temp;
	double b[2];
	b[0]=0;b[1]=0;
	
	double M11=0,M12=0,M22=0;
	for(int i = -OPTICAL_FLOW_POINT_AREA/2; i < OPTICAL_FLOW_POINT_AREA/2; i++)
	{
		for (int j = -OPTICAL_FLOW_POINT_AREA/2;j < OPTICAL_FLOW_POINT_AREA/2;j++)
		{
			temp = partial(currentFrame,Point2D(p.row+i,p.col+j));
			M11 += temp.dcol*temp.dcol;
			M12 += temp.dcol*temp.drow;
			M22 += temp.drow*temp.drow;
			b[0] += temp.dcol*(pixval8U(currentFrame,p.row+i,p.col+j)-pixval8U(preFrame,p.row+i,p.col+j));
			b[1] += temp.drow*(pixval8U(currentFrame,p.row+i,p.col+j)-pixval8U(preFrame,p.row+i,p.col+j));
		}
	}
	double a[] = {M11,M12,M12,M22};
	CvMat M=cvMat(2, 2, CV_64FC1, a);
	CvMat *Mi = cvCloneMat(&M);
	cvInvert(&M,Mi,CV_SVD);
	temp.col=0;
	temp.row=0;
	b[0] = -b[0];
	b[1] = -b[1];
	CvMat Mb = cvMat(2,1,CV_64FC1,b);
	CvMat *Mr = cvCloneMat(&Mb);
	cvMatMul( Mi, &Mb, Mr);
	double vy = (cvmGet(Mr,1,0));
	double vx = (cvmGet(Mr,0,0));
	
	return (Point2D(vy,vx));
}
예제 #4
0
파일: Kalman.cpp 프로젝트: tzwenn/alvar
KalmanSensorEkf::KalmanSensorEkf(const KalmanSensorEkf &k) : KalmanSensor(k) {
	delta   = cvCloneMat(k.delta);
	x_plus  = cvCloneMat(k.x_plus);
	x_minus = cvCloneMat(k.x_minus);
	z_tmp1  = cvCloneMat(k.z_tmp1);
	z_tmp2  = cvCloneMat(k.z_tmp2);
}
예제 #5
0
파일: UKF.cpp 프로젝트: refopen/Rsimulator
int CUKF::Augment()
{
	int i, /*ii,*/ j, k, l;

	int newzno = zn.size();
	if(newzno == 0) return -1;

	for(l=1; l<newzno+1; l++)
	{
		printf("zn index : %d\n", zn[l-1].idx);
		int rows = XX->rows;
		int cols = XX->cols;
		int dim = XX->rows;
		CvMat *temp1 = cvCloneMat(XX);
		cvReleaseMat(&XX);
		XX = cvCreateMat(dim+2, 1, CV_64FC1);
		for(i=0; i<rows; i++)	XX->data.db[i] = temp1->data.db[i];
// 		for(i=rows, ii=l-1; i<XX->rows; i+=2, ii++)
// 		{
			XX->data.db[rows] = zn[l-1].x;
			XX->data.db[rows+1] = zn[l-1].y;
//		}

		rows = PX->rows;
		cols = PX->cols;
		CvMat *temp2= cvCloneMat(PX);
		cvReleaseMat(&PX);
		PX = cvCreateMat(rows+2, cols+2, CV_64FC1);

		cvZero(PX);
		for(i=0; i<rows; i++)	for(j=0; j<cols; j++)
			PX->data.db[i*PX->cols+j] = temp2->data.db[i*cols+j];

		for(i=rows, k=0; i<PX->rows; i++, k++)	for(j=cols; j<PX->cols; j++)
		{
			if(i==j)
			{
				if(k%2==0)	PX->data.db[i*PX->cols+j] = R->data.db[0];
				else		PX->data.db[i*PX->cols+j] = R->data.db[3];
			}
		}

		cvReleaseMat(&temp1);
		cvReleaseMat(&temp2);

//  	 	PrintCvMat(XX, "XX");
//  		PrintCvMat(PX, "PX");

		UnscentedTransform(AUGMENT_MODEL);
	}

	return 0;
}
예제 #6
0
/**
 * Calculate pose of the camera. Since no translation are made
 * Only the rotation is calculated.
 *
 * [R|T]
 */
CvMat* MultipleViewGeomOld::calculateRotationMatrix(float angle) {

    // | R T |
    // | 0 1 |
    // 1 	 0 	    0 	0
    // 0 cos() -sin()   0
    // 0 sin()  cos()   0
    // 0     0      0   1

    float sinTeta = sin(angle);
    float cosTeta = cos(angle);

    float a[] = { 1, 0, 0, 0, 0, cosTeta, -sinTeta, 0, 0, sinTeta, cosTeta, 0,
                  0, 0, 0, 1
                };

    //CvMat rtMat = cvMat(4, 4, CV_32FC1, a);
    //rtMat = *cvCloneMat(&rtMat);

    CvMat* rtMat = cvCreateMat(4, 4, CV_32F);
    cvInitMatHeader(rtMat, 4, 4, CV_32F, a);
    rtMat = cvCloneMat(rtMat);

    LOG4CPLUS_DEBUG(myLogger,"Rotation R|T matrix for angle: " << angle << endl << printCvMat(rtMat));

    return rtMat;

}
예제 #7
0
CvMat *normalize(const CvMat* vector) {
    CvMat *norm = cvCloneMat(vector);
    double normVal = cvNorm(vector);
    cvScale(vector, norm, 1 / normVal);
    
    return norm;
}
//Input image, Edges, outputNameFile
int homography_transformation(CvMat* src, char* out_filename, CvPoint2D32f* srcQuad){
	if(src == NULL || out_filename == NULL || srcQuad == NULL)
		return -1;
	
	CvPoint2D32f dstQuad[4]; 				//Destination vertices
   	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);  	//transformation matrix
    	CvMat* dst = cvCloneMat(src);   			//clone image  
    	int p[3]={CV_IMWRITE_JPEG_QUALITY,JPEG_QUALITY,0};	//FORMAT, QUALITY
    
    	dstQuad[0].x = 0;       	//dst Top left
    	dstQuad[0].y = 0;
    	dstQuad[1].x = src->cols;  	//dst Top right
    	dstQuad[1].y = 0;
    	dstQuad[2].x = 0;     		//dst Bottom left
    	dstQuad[2].y = src->rows;      
    	dstQuad[3].x = src->cols;  	//dst Bot right
    	dstQuad[3].y = src->rows;

    	// get transformation matrix that you can use to calculate 
    	cvGetPerspectiveTransform(srcQuad,dstQuad, warp_matrix);
    	// perspective transformation. Parameters: source, destination, warp_matrix, 
	//type of interpolation: (CV_INTER_LINEAR, CV_INTER_AREA, CV_INTER_CUBIC, CV_INTER_LANCZOS4)
  	///Set all scalar with the same value. 0 means the black color of border
	cvWarpPerspective(src, dst, warp_matrix, CV_INTER_LINEAR, cvScalarAll(0)); // 1 = CV_INTER_LINEAR

	cvSaveImage(out_filename,dst, p);
	//close and destroy all stuff

	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&dst);
	return 0;
}
예제 #9
0
bool CameraParameters::loadParameters(const char* filename){

    CvFileStorage* fstorage = 0;
    fstorage =  cvOpenFileStorage(filename, NULL,  CV_STORAGE_READ);

    if(!fstorage)
        return false;

    this->m_mat_camera_matrix = cvCloneMat( (CvMat*) cvReadByName(fstorage, 0, CAMERA_MATRIX_TAG));
    this->m_mat_distortion_coeff = cvCloneMat((CvMat*) cvReadByName(fstorage, 0, DISTORTION_COEF_TAG));

    cvReleaseFileStorage(&fstorage);

    return true;

}
예제 #10
0
파일: t1.cpp 프로젝트: JoyFYan/OPENCV
int main(int argc, char* argv[])
{

	//创建矩阵 方式1  直接创建
	CvMat* pmat1;
	pmat1 = cvCreateMat(8, 9, CV_32FC1);

	//创建矩阵方式2  先创建矩阵头部  再创建矩阵的数据块的内存空间
	CvMat* pmat2;
	pmat2 = cvCreateMatHeader(4, 5, CV_8UC1);
	cvCreateData(pmat2);
	
	//创建矩阵方式3  通过数据创建矩阵
	float data[4] = { 3, 4, 6, 0 };
	CvMat pmat3;
	cvInitMatHeader(&pmat3, 2, 2, CV_32FC1, data);

	//创建矩阵方式4 通过已有矩阵进行克隆
	CvMat* pmat4;
	pmat4 = cvCloneMat(pmat2);

	//访问矩阵的相关属性
	test(pmat2);


	//释放矩阵的内存空间
	cvReleaseMat(&pmat1);
	cvReleaseMat(&pmat2);
	cvReleaseMat(&pmat4);
	
	return 0;
}
예제 #11
0
int main(int argc, char * argv[])
{
  int i,j;
  const char * imglist[] = {
    "../data/palm.train/positive.txt",
    "../data/palm.train/negative.txt",""
  };
  char filename[2][1<<13][256];
  int count[2]={0,0};
  char line[1024];
  FILE * fp;
  IplImage * tmp;
  CvMat *** samples = new CvMat **[2];
  CvMat ** samples_stub = new CvMat *[2];

  // load image info
  for (i=0;i<2;i++){
  fp = fopen(imglist[i],"r");
  for (j=0;;j++){
    fgets(line,1024,fp);
    if (line[0]=='-'){break;}
    // strcpy(filename[i][j],line);
    sscanf(line,"%s",filename[i][j]);
  }
  fclose(fp);
  count[i]=j-1;
  }

  // load raw image
  for (i=0;i<2;i++){
  samples[i] = new CvMat * [count[i]];
  samples_stub[i] = new CvMat[count[i]];
  for (j=0;j<count[i];j++){
    if (!icvFileExist(filename[i][j])){
      LOGE("file %s not exist",filename[i][j]);
    }
    tmp = cvLoadImage(filename[i][j],0);
    samples[i][j] = cvCloneMat(cvGetMat(tmp,&samples_stub[i][j]));
    cvReleaseImage(&tmp);
  }
  }
  
  // fprintf(stderr, "INFO: %d positive samples!\n", i);

  CvStagedDetectorHOG detector;
  detector.cascadetrain(samples[0],count[0],samples[1],count[1]);

  // release raw images
  for (i=0;i<2;i++){
  for (j=0;j<count[i];j++){
    cvReleaseMat(&samples[i][j]);
  }
  delete [] samples[i];
  }
  delete [] samples;
  
  return 0;
}
예제 #12
0
bool MixGaussian::AddGaussian(
		CvMat * MeanMat,
		CvMat * CovMat,
		float Weight)
{
	if(_nMixture == MAX_NUM_GAUSSIAN)
		return false;

	_MeanMat[_nMixture] = cvCloneMat(MeanMat);
	_CovMat [_nMixture] = cvCloneMat(CovMat);
	_CovMatI[_nMixture] = cvCloneMat(CovMat);
	cvInvert(CovMat, _CovMatI[_nMixture]);
	_Weight [_nMixture] = Weight;

	_nMixture++;

	return true;
}
void AdaBoost_train_cascade(vector<vector<WEAKCLASSIFIER>> &sc, CvMat *features_matrix, vector<int> &vecLabel, vector<WEAKCLASSIFIER> &wc_pool, const char *sPath)
{
	int num_pos = 0;
	while (vecLabel[num_pos] != -1)
		num_pos++;
	int num_neg = vecLabel.size() - num_pos;
	CvMat *features_matrix_pos = cvCreateMat(num_pos, features_matrix->cols, CV_32FC1);
	for (int kx = 0;kx < num_pos; kx++){
		for (int j = 0;j < features_matrix_pos->cols;j++)
			features_matrix_pos->data.fl[kx*features_matrix_pos->cols + j] = features_matrix->data.fl[kx*features_matrix->cols+j];
	}
	CvMat *features_matrix_neghard = cvCreateMat(num_neg, features_matrix->cols, CV_32FC1);
	for (int kx = num_pos;kx < num_pos + num_neg; kx++){
		for (int j = 0;j < features_matrix_neghard->cols;j++)
			features_matrix_neghard->data.fl[(kx-num_pos)*features_matrix_neghard->cols + j] = features_matrix->data.fl[kx*features_matrix->cols+j];
	}
	vector<int> vecLabel_parts;

	int itr = 0;
	do{
		printf("***********itr=%d************\n", itr);
		CvMat *features_matrix_neg = cvCreateMat(MIN(num_pos, features_matrix_neghard->rows), features_matrix->cols, CV_32FC1);
		for (int kx = 0;kx < MIN(num_pos, features_matrix_neghard->rows); kx++)
			for (int j = 0;j < features_matrix_neg->cols;j++)
				features_matrix_neg->data.fl[kx*features_matrix_neg->cols+j] = features_matrix_neghard->data.fl[kx*features_matrix_neghard->cols+j];

		CvMat *features_matrix_trainparts  = MergeMatrixByRows(features_matrix_pos, features_matrix_neg, 0);
		vector<WEAKCLASSIFIER> nc; vecLabel_parts.clear();
		for (int kx = 0;kx < features_matrix_pos->rows;kx++)
			vecLabel_parts.push_back(1);
		for (int kx = 0;kx< features_matrix_neg->rows;kx++)
			vecLabel_parts.push_back(-1);
		float node_threshold = 0;
		AdaBoost_train_node(nc, features_matrix_trainparts, vecLabel_parts, wc_pool);
		sc.push_back(nc);
		WriteClassifier(sc, sPath);
		
		cvReleaseMat(&features_matrix_trainparts);
		cvReleaseMat(&features_matrix_neg);
		CvMat *features_matrix_neghard_update = UpdateNegSamples(sc, features_matrix_neghard);
		if (features_matrix_neghard_update != NULL){
			cvReleaseMat(&features_matrix_neghard);
			features_matrix_neghard = cvCloneMat(features_matrix_neghard_update); 
			cvReleaseMat(&features_matrix_neghard_update);
		}
		else{
			break;
		}

		itr++;
	}
	while(itr < 20);

	cvReleaseMat(&features_matrix_neghard);
	cvReleaseMat(&features_matrix_pos);
}
예제 #14
0
LightCollector::LightCollector(const LightCollector &lc)
{
	if (lc.vertices) vertices = cvCloneMat(lc.vertices);
	else vertices=0;
	if (lc.transformed) transformed = cvCloneMat(lc.transformed);
	else transformed=0;
	nbTri = lc.nbTri;
	nx = lc.nx;
	ny = lc.ny;
	if (lc.triangles) {
		triangles = new int[nbTri*3];
		memcpy(triangles, lc.triangles, sizeof(int)*nbTri*3);
	} else triangles=0;
	avgChannels = lc.avgChannels;
	avg=0;
	if (lc.worldRT) cvIncRefData(lc.worldRT);
	worldRT = lc.worldRT;
	//assert(lc.avg==0);
}
예제 #15
0
파일: cvcgsolve.cpp 프로젝트: caomw/l1cs
double cvCGSolve( CvMatOps AOps, void* userdata, CvMat* B, CvMat* X, CvTermCriteria term_crit )
{
	cvZero( X );
	CvMat* R = cvCloneMat( B );
	double delta = cvDotProduct( R, R );
	CvMat* D = cvCloneMat( R );
	double delta0 = delta;
	double bestres = 1.;
	CvMat* BX = cvCloneMat( X );
	CvMat* Q = cvCreateMat( X->rows, X->cols, CV_MAT_TYPE(X->type) );
	for ( int i = 0; i < term_crit.max_iter; i++ )
	{
		AOps( D, Q, userdata );
		double alpha = delta / cvDotProduct( D, Q );
		cvScaleAdd( D, cvScalar(alpha), X, X );
		if ( (i + 1) % 50 == 0 )
		{
			AOps( X, R, userdata );
			cvSub( B, R, R );
		} else {
			cvScaleAdd( Q, cvScalar(-alpha), R, R );
		}
		double deltaold = delta;
		delta = cvDotProduct( R, R );
		double beta = delta / deltaold;
		cvScaleAdd( D, cvScalar(beta), R, D );
		double res = delta / delta0;
		if ( res < bestres )
		{
			cvCopy( X, BX );
			bestres = res;
		}
		if ( delta < delta0 * term_crit.epsilon )
			break;
	}
	cvCopy( BX, X );
	cvReleaseMat( &R );
	cvReleaseMat( &D );
	cvReleaseMat( &BX );
	cvReleaseMat( &Q );
	return sqrt( bestres );
}
예제 #16
0
/*
 * Constructor
 *
 * (u0,v0) = image centre
 * alfaU = f*ku = pixels per unit of measurement in Y e.g. 1 pixel/microm
 * alfaV
 */
MultipleViewGeomOld::MultipleViewGeomOld(int u0, int v0, float alfaU, float alfaV) :
    myLogger(log4cplus::Logger::getInstance("multipleViewGeom")) {

    // Ortographic projection
    float pContents[] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1 };
    pMat = cvCreateMat(3, 4, CV_32F);
    cvInitMatHeader(pMat, 3, 4, CV_32F, pContents);
    pMat = cvCloneMat(pMat);

    calculateCameraMatrix(u0, v0, alfaU, alfaV);

}
예제 #17
0
CvMat* CamAugmentation::GetCamEyeMatrix( int c ){

  // If enough cameras mounted (calibrated):
  if( s_optimal.v_camera_c.size() > (unsigned)c ){

    if(s_optimal.v_camera_r_t[c])
      return cvCloneMat(s_optimal.v_camera_r_t[c]);
    else
      return NULL;
  } else
    return NULL;
}
예제 #18
0
파일: UKF.cpp 프로젝트: refopen/Rsimulator
int CUKF::Prediction()
{
	int rows = XX->rows;
	int cols = XX->cols;
	int i, j;

	//temp1 = cvCreateMat(rows, cols, CV_64FC1);
	//cvCopy(XX, temp1);
	//cvReleaseMat(&XX);
	CvMat *temp1 = cvCloneMat(XX);
	XX = cvCreateMat(rows+2, cols, CV_64FC1);

	for(i=0; i<rows; i++)	XX->data.db[i] = temp1->data.db[i];
	XX->data.db[rows+2-2] = Vn;
	XX->data.db[rows+2-1] = Gn;

	rows = PX->rows;
	cols = PX->cols;

	CvMat *temp2 = cvCloneMat(PX);
	cvReleaseMat(&PX);
	PX = cvCreateMat(rows+2, cols+2, CV_64FC1);
	cvZero(PX);
	for(i=0; i<rows; i++)	
		for(j=0; j<cols; j++)	
			PX->data.db[i*PX->cols+j] = temp2->data.db[i*cols+j];
	i = PX->rows-2;	PX->data.db[i*PX->cols+i] = Q->data.db[0];
	i = PX->rows-1;	PX->data.db[i*PX->cols+i] = Q->data.db[3];

	cvReleaseMat(&temp1);
 	cvReleaseMat(&temp2);

// 	PrintCvMat(XX, "Prediction : XX");
// 	PrintCvMat(PX, "Prediction : PX");

	UnscentedTransform(MOTION_MODEL);

	return 0;
}
예제 #19
0
파일: Kalman.cpp 프로젝트: tzwenn/alvar
KalmanSensorCore::KalmanSensorCore(const KalmanSensorCore &k) {
	m = k.m;
	n = k.n;
	z = cvCloneMat(k.z);
	H = cvCloneMat(k.H);
	H_trans = cvCloneMat(k.H_trans);
	K = cvCloneMat(k.K);
	z_pred = cvCloneMat(k.z_pred);
	z_residual = cvCloneMat(k.z_residual);
	x_gain = cvCloneMat(k.x_gain);
}
예제 #20
0
void LightCollector::copy(const LightCollector &lc)
{
	if (lc.vertices) vertices = cvCloneMat(lc.vertices);
	else vertices=0;
	if (lc.transformed) transformed = cvCloneMat(lc.transformed);
	else transformed=0;
	nbTri = lc.nbTri;
	nx = lc.nx;
	ny = lc.ny;
	avgChannels = lc.avgChannels;
	if (lc.triangles) {
		triangles = new int[nbTri*3];
		memcpy(triangles, lc.triangles, sizeof(int)*nbTri*3);
	} else 
		triangles=0;
	if (lc.avg) {
		avg = new float[avgChannels*nbTri];
		memcpy(avg, lc.avg, sizeof(float)*avgChannels*nbTri);
	} else 
		avg=0;
	if (lc.worldRT) worldRT=cvCloneMat(lc.worldRT);
	else worldRT = 0;
}
예제 #21
0
/**
 * Calculate: K P R|T
 */
CvMat* MultipleViewGeomOld::calculateProjectionMatrix(CvMat *rtMat) {


    // 3 rows, 4 columns
    CvMat* kpMat = cvCreateMat(3, 4, CV_32FC1);
    cvMatMul(kMat,pMat,kpMat);

    CvMat* projMat = cvCreateMat(3, 4, CV_32FC1);
    cvMatMul(kpMat,rtMat,projMat);

    projMat = cvCloneMat(projMat);

    LOG4CPLUS_DEBUG(myLogger,"Projection K P R|T matrix" << endl << printCvMat(projMat));

    return projMat;
}
예제 #22
0
/**
 *
 * Constructor for test purposes
 *
 */
MultipleViewGeomOld::MultipleViewGeomOld() :
    myLogger(log4cplus::Logger::getInstance("multipleViewGeom")) {

    // Ortographic projection
    float pContents[] = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1 };
    //pMat = cvMat(3, 4, CV_32FC1, pContents);

    pMat = cvCreateMat(3, 4, CV_32F);
    cvInitMatHeader(pMat, 3, 4, CV_32F, pContents);
    pMat = cvCloneMat(pMat);


    //float projMatContents[3][4];
    //projMat = cvMat(3, 4, CV_32FC1, projMatContents);

}
예제 #23
0
/**
 * Calculate K matrix
 *
 *  ------------> u
 * |
 * |      ^ Y
 * |      |
 * |      |
 * |       -----> X
 * |
 * |
 * V v
 *
 */
void MultipleViewGeomOld::calculateCameraMatrix(int u0, int v0, float alfaU,
        float alfaV) {

    //     au  0 u0
    // k =  0 av v0
    //      0  0  1

    float a[] = { alfaU, 0, u0, 0, alfaV, v0, 0, 0, 1 };

    kMat = cvCreateMat(3, 3, CV_32F);
    cvInitMatHeader(kMat, 3, 3, CV_32F, a);
    kMat = cvCloneMat(kMat);

    LOG4CPLUS_DEBUG(myLogger,"Camera K matrix" << endl << printCvMat(kMat));

}
예제 #24
0
/**
 * Projects a point in real world coordinates against the image
 * Output: image coordinate in pixels
 */
CvPoint MultipleViewGeomOld::getProjectionOf(float angle, CvPoint3D32f point) {

    //
    map<float, CvMat*>::iterator iter = projMatList.find(angle);

    CvMat *projMat = cvCreateMat(4, 3, CV_32FC1);

    if (iter == projMatList.end()) {
        // project matrix does not exist!!

        // Calculate rotation matrix
        CvMat* rtMat = calculateRotationMatrix(angle);

        // Calculate projection matrix
        projMat = calculateProjectionMatrix(rtMat);

        projMat = cvCloneMat(projMat);

        projMatList.insert(pair<float, CvMat*> (angle, projMat));

    } else {
        // otherwise it exists
        projMat = iter->second;
    }

    LOG4CPLUS_DEBUG(myLogger,"Projection matrix for angle: " << radToDegree(angle) << " and points: " << point << endl <<  printCvMat(projMat));

    //  [u v 1] = proj * [X Y Z 1]

    float uvContents[3];
    //CvMat* uvMat = cvMat(3, 1, CV_32F, uvContents);
    CvMat* uvMat = cvCreateMat(3, 1, CV_32F);
    cvInitMatHeader(uvMat, 3, 1, CV_32F, uvContents);


    float xyzContents[] = { point.x, point.y, point.z, 1 };
    //CvMat* xyzMat = cvMat(4, 1, CV_32F, xyzContents);
    CvMat* xyzMat = cvCreateMat(4, 1, CV_32F);
    cvInitMatHeader(xyzMat, 4, 1, CV_32F, xyzContents);

    cvMatMul (projMat, xyzMat,uvMat);

    LOG4CPLUS_DEBUG(myLogger, "Result [u v 1] = proj * [X Y Z 1]: " << endl << printCvMat(uvMat));

    return cvPoint(cvRound(cvmGet(uvMat, 0, 0)), cvRound(cvmGet(uvMat, 1, 0)));

}
예제 #25
0
void CEnrollDlg::OnBnClickedShoot()
{
	// TODO: 在此添加控件通知处理程序代码
	if (!m_bFoundFace) return;
	//CFaceAlign* &al = g_faceMngr->align;
#ifndef COMPILE_ALIGN_COORD

	CvRect rc = g_faceMngr->align->m_rcCurFace;
	CvSize fsz = g_webcam.GetFrameSize();

	// 确保注册时脸不要太偏,大小合适
	if (rc.x < fsz.width * 5 / 100				||
		rc.x + rc.width > fsz.width * 98 / 100	||
		rc.y < fsz.height * 5 / 100				||
		rc.y + g_faceMngr->align->m_dis*16/6 > fsz.height*95/100||
		rc.width < fsz.width / 4	||
		rc.width > fsz.width * .7	||
		rc.height < fsz.height *.4	||
		rc.height > fsz.height*9/10)
	{
		::AfxMessageBox("请适当调整,确保脸在画面中央,距离半米左右。");
		return;
	}

#endif

	CWnd *pBtn = GetDlgItem(IDC_SHOOT);
	RECT rc1;
	pBtn->GetClientRect(&rc1);
	pBtn->SetWindowText("拍摄下一张");
	pBtn->SetWindowPos(NULL, 0,0, 70, rc1.bottom-rc1.top, SWP_NOMOVE | SWP_NOZORDER);
	GetDlgItem(IDC_DEL_SHOOT)->ShowWindow(SW_SHOW);
	GetDlgItem(IDC_ENROLL_PIC)->ShowWindow(SW_HIDE);
	GetDlgItem(IDC_IMPORT_MODELS)->ShowWindow(SW_HIDE);

	CString title;
	title.Format("pic %d", m_nShootNum++);
	cvNamedWindow(title, 0);
	cvResizeWindow(title, showSz.width, showSz.height);
	cvShowImage(title, faceImg8);

	IplImage *pic1 = cvCloneImage(m_frame);
	m_lstPic.AddTail(pic1);
	CvMat *face = cvCloneMat(faceImg8);
	m_lstFace.AddTail(face);
}
void mcvIplImageToCvMat(IplImage* im, CvMat **clrImage, CvMat** channelImage)
{
    // convert to mat and get first channel
    CvMat temp;
    cvGetMat(im, &temp);
    *clrImage = cvCloneMat(&temp);
    // convert to single channel
    CvMat* tchannelImage = cvCreateMat(im->height, im->width, INT_MAT_TYPE);
    cvSplit(*clrImage, tchannelImage, NULL, NULL, NULL);
    // convert to float
    *channelImage = cvCreateMat(im->height, im->width, FLOAT_MAT_TYPE);
    cvConvertScale(tchannelImage, *channelImage, 1./255);
    // destroy
    cvReleaseMat(&tchannelImage);
    //cvReleaseImage(&im);

}
예제 #27
0
파일: main.cpp 프로젝트: 3deggi/roadtrac
void drawcg()
{
	glMatrixMode (GL_PROJECTION);
	glLoadIdentity();
	glOrtho(0, IMG_WIDTH, 0, IMG_HEIGHT, -1.0, 1.0);

	list<blobseq*>::iterator lblob = m_tracker.lblobseqref.begin();
	for (;lblob!=m_tracker.lblobseqref.end();++lblob)
	{
		if ((*lblob)->overlap==0)
		{
				continue;
		}
						
		glEnable (GL_DEPTH_TEST);

		CvMat* hmt = cvCloneMat((*lblob)->homography);
		cvmSet(hmt,2, 0, cvmGet(hmt,2,0)/IMG_SCALE);
		cvmSet(hmt,2, 1, cvmGet(hmt,2,1)/IMG_SCALE);
		cvmSet(hmt,2, 2, cvmGet(hmt,2,2)/IMG_SCALE);
			
		setviewfromhomography(hmt);
		
		glMatrixMode(GL_PROJECTION);
		glLoadMatrixd(projection);
		glMultMatrixd(view);

		glPushMatrix();			
		//draw labels
		for (unsigned i=0;i<(*lblob)->labels.size();i++)
		{
			glPushMatrix();
			drawCircle((float)((*lblob)->labels[i].x/IMG_SCALE), (float)((*lblob)->labels[i].y/IMG_SCALE), 3.0, 20);
			drawText((float)((*lblob)->labels[i].x/IMG_SCALE), (float)((*lblob)->labels[i].y/IMG_SCALE-10), 0, (*lblob)->labels[i].name, 7, !m_rotated, m_rotated, 1, 1, 0.0);
			glPopMatrix();	
		}
		drawText((float)((*lblob)->bound.x+(*lblob)->bound.width/2),(float)((*lblob)->bound.y+(*lblob)->bound.height/2), 0.0, (*lblob)->name, 20.0, true);

		glPopMatrix();
		glDisable(GL_DEPTH_TEST);
		cvReleaseMat(&hmt);
		(*lblob)->overlap=0;
	}
	
}
예제 #28
0
/*
 * \param Intrinsic  : 3 by 3 camera intrinsic matrix  
 * \param bazProjMat : 3 by 4 projection matrix  = K[RT] obtained by calling "augment.GetProjectionMatrix(0)" function
 * \arRT : 4 by 4 [R t] matrix for rendering
*/
	void BazARTracker::GetARToolKitRTfromBAZARProjMat(CvMat *Intrinsic, CvMat *bazProjMat, CvMat *arRT)
	{
		int i, j;
		CvMat *temps = cvCreateMat(3, 4, CV_64F);
		
		// inverse matrix with intrinsic camera parameters
		CvMat *invK = cvCloneMat(Intrinsic);
		cvInv(Intrinsic, invK);
		// multiply bazProjMat with inverted intrinsic camera parameter matrix -> extract intrinsic parameters
		cvMatMul(invK, bazProjMat, temps);

		cvmSetIdentity(arRT);
		for(i=0; i<temps->rows; i++)
		{
			for(j=0; j<temps->cols;j++)  cvmSet(arRT, i, j, cvmGet(temps, i,j));
		}
	
		cvReleaseMat(&temps);
		cvReleaseMat(&invK);
	}
예제 #29
0
float MixGaussian::GetProbability(CvMat * Sample)
{
	double P = 0.0;
	CvMat *diffMat = cvCloneMat(Sample);
	CvMat *diffMatT = cvCreateMat(1, _nDim, CV_64FC1);
	double expo;
	CvMat expoMat = cvMat(1, 1, CV_64FC1, &expo);

	for(int k = 0; k < _nMixture ; k++) {
		cvSub(Sample, _MeanMat[k], diffMat);
		cvTranspose(diffMat, diffMatT);
		cvMatMul(_CovMatI[k], diffMat, diffMat);
		cvMatMul(diffMatT, diffMat, &expoMat);
		expo *= (-0.5);
		P += (_Weight[k] * 1.0 / (pow(2 * CV_PI, 1.5) * sqrt(cvDet(_CovMat[k]))) * exp(expo));
	}

	cvReleaseMat(&diffMat);
	cvReleaseMat(&diffMatT);

	return P;
}
예제 #30
0
//特征匹配
void SiftMatch::on_matchButton_clicked()
{
    //若用户勾选了水平排列按钮
    if(ui->radioButton_horizontal->isChecked())
    {
        //将2幅图片合成1幅图片,img1在左,img2在右
        stacked = stack_imgs_horizontal(img1, img2);//合成图像,显示经距离比值法筛选后的匹配结果
    }
    else//用户勾选了垂直排列按钮
    {
        verticalStackFlag = true;//垂直排列标识设为true
        //将2幅图片合成1幅图片,img1在上,img2在下
        stacked = stack_imgs( img1, img2 );//合成图像,显示经距离比值法筛选后的匹配结果
    }

    //根据图1的特征点集feat1建立k-d树,返回k-d树根给kd_root
    kd_root = kdtree_build( feat1, n1 );

    Point pt1,pt2;//连线的两个端点
    double d0,d1;//feat2中每个特征点到最近邻和次近邻的距离
    int matchNum = 0;//经距离比值法筛选后的匹配点对的个数

    //遍历特征点集feat2,针对feat2中每个特征点feat,选取符合距离比值条件的匹配点,放到feat的fwd_match域中
    for(int i = 0; i < n2; i++ )
    {
        feat = feat2+i;//第i个特征点的指针
        //在kd_root中搜索目标点feat的2个最近邻点,存放在nbrs中,返回实际找到的近邻点个数
        int k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
        if( k == 2 )
        {
            d0 = descr_dist_sq( feat, nbrs[0] );//feat与最近邻点的距离的平方
            d1 = descr_dist_sq( feat, nbrs[1] );//feat与次近邻点的距离的平方
            //若d0和d1的比值小于阈值NN_SQ_DIST_RATIO_THR,则接受此匹配,否则剔除
            if( d0 < d1 * NN_SQ_DIST_RATIO_THR )
            {   //将目标点feat和最近邻点作为匹配点对
                pt2 = Point( cvRound( feat->x ), cvRound( feat->y ) );//图2中点的坐标
                pt1 = Point( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );//图1中点的坐标(feat的最近邻点)
                if(verticalStackFlag)//垂直排列
                    pt2.y += img1->height;//由于两幅图是上下排列的,pt2的纵坐标加上图1的高度,作为连线的终点
                else
                    pt2.x += img1->width;//由于两幅图是左右排列的,pt2的横坐标加上图1的宽度,作为连线的终点
                cvLine( stacked, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 );//画出连线
                matchNum++;//统计匹配点对的个数
                feat2[i].fwd_match = nbrs[0];//使点feat的fwd_match域指向其对应的匹配点
            }
        }
        free( nbrs );//释放近邻数组
    }
    qDebug()<<tr("经距离比值法筛选后的匹配点对个数:")<<matchNum<<endl;
    //显示并保存经距离比值法筛选后的匹配图

    //cvNamedWindow(IMG_MATCH1);//创建窗口
    //cvShowImage(IMG_MATCH1,stacked);//显示

    //保存匹配图
    QString name_match_DistRatio = name1;//文件名,原文件名去掉序号后加"_match_DistRatio"
    cvSaveImage(name_match_DistRatio.replace( name_match_DistRatio.lastIndexOf(".",-1)-1 , 1 , "_match_DistRatio").toAscii().data(),stacked);


    //利用RANSAC算法筛选匹配点,计算变换矩阵H,
    //无论img1和img2的左右顺序,H永远是将feat2中的特征点变换为其匹配点,即将img2中的点变换为img1中的对应点
    H = ransac_xform(feat2,n2,FEATURE_FWD_MATCH,lsq_homog,4,0.01,homog_xfer_err,3.0,&inliers,&n_inliers);

    //若能成功计算出变换矩阵,即两幅图中有共同区域
    if( H )
    {
        qDebug()<<tr("经RANSAC算法筛选后的匹配点对个数:")<<n_inliers<<endl;

//        //输出H矩阵
//        for(int i=0;i<3;i++)
//            qDebug()<<cvmGet(H,i,0)<<cvmGet(H,i,1)<<cvmGet(H,i,2);

        if(verticalStackFlag)//将2幅图片合成1幅图片,img1在上,img2在下
            stacked_ransac = stack_imgs( img1, img2 );//合成图像,显示经RANSAC算法筛选后的匹配结果
        else//将2幅图片合成1幅图片,img1在左,img2在右
            stacked_ransac = stack_imgs_horizontal(img1, img2);//合成图像,显示经RANSAC算法筛选后的匹配结果

        //img1LeftBound = inliers[0]->fwd_match->x;//图1中匹配点外接矩形的左边界
        //img1RightBound = img1LeftBound;//图1中匹配点外接矩形的右边界
        //img2LeftBound = inliers[0]->x;//图2中匹配点外接矩形的左边界
        //img2RightBound = img2LeftBound;//图2中匹配点外接矩形的右边界

        int invertNum = 0;//统计pt2.x > pt1.x的匹配点对的个数,来判断img1中是否右图

        //遍历经RANSAC算法筛选后的特征点集合inliers,找到每个特征点的匹配点,画出连线
        for(int i=0; i<n_inliers; i++)
        {
            feat = inliers[i];//第i个特征点
            pt2 = Point(cvRound(feat->x), cvRound(feat->y));//图2中点的坐标
            pt1 = Point(cvRound(feat->fwd_match->x), cvRound(feat->fwd_match->y));//图1中点的坐标(feat的匹配点)
            //qDebug()<<"pt2:("<<pt2.x<<","<<pt2.y<<")--->pt1:("<<pt1.x<<","<<pt1.y<<")";//输出对应点对

            /*找匹配点区域的边界
            if(pt1.x < img1LeftBound) img1LeftBound = pt1.x;
            if(pt1.x > img1RightBound) img1RightBound = pt1.x;
            if(pt2.x < img2LeftBound) img2LeftBound = pt2.x;
            if(pt2.x > img2RightBound) img2RightBound = pt2.x;//*/

            //统计匹配点的左右位置关系,来判断图1和图2的左右位置关系
            if(pt2.x > pt1.x)
                invertNum++;

            if(verticalStackFlag)//垂直排列
                pt2.y += img1->height;//由于两幅图是上下排列的,pt2的纵坐标加上图1的高度,作为连线的终点
            else//水平排列
                pt2.x += img1->width;//由于两幅图是左右排列的,pt2的横坐标加上图1的宽度,作为连线的终点
            cvLine(stacked_ransac,pt1,pt2,CV_RGB(255,0,255),1,8,0);//在匹配图上画出连线
        }

        //绘制图1中包围匹配点的矩形
        //cvRectangle(stacked_ransac,cvPoint(img1LeftBound,0),cvPoint(img1RightBound,img1->height),CV_RGB(0,255,0),2);
        //绘制图2中包围匹配点的矩形
        //cvRectangle(stacked_ransac,cvPoint(img1->width+img2LeftBound,0),cvPoint(img1->width+img2RightBound,img2->height),CV_RGB(0,0,255),2);

        //cvNamedWindow(IMG_MATCH2);//创建窗口
        //cvShowImage(IMG_MATCH2,stacked_ransac);//显示经RANSAC算法筛选后的匹配图

        //保存匹配图
        QString name_match_RANSAC = name1;//文件名,原文件名去掉序号后加"_match_RANSAC"
        cvSaveImage(name_match_RANSAC.replace( name_match_RANSAC.lastIndexOf(".",-1)-1 , 1 , "_match_RANSAC").toAscii().data(),stacked_ransac);


        /*程序中计算出的变换矩阵H用来将img2中的点变换为img1中的点,正常情况下img1应该是左图,img2应该是右图。
          此时img2中的点pt2和img1中的对应点pt1的x坐标的关系基本都是:pt2.x < pt1.x
          若用户打开的img1是右图,img2是左图,则img2中的点pt2和img1中的对应点pt1的x坐标的关系基本都是:pt2.x > pt1.x
          所以通过统计对应点变换前后x坐标大小关系,可以知道img1是不是右图。
          如果img1是右图,将img1中的匹配点经H的逆阵H_IVT变换后可得到img2中的匹配点*/

        //若pt2.x > pt1.x的点的个数大于内点个数的80%,则认定img1中是右图
        if(invertNum > n_inliers * 0.8)
        {
            qDebug()<<tr("img1中是右图");
            CvMat * H_IVT = cvCreateMat(3, 3, CV_64FC1);//变换矩阵的逆矩阵
            //求H的逆阵H_IVT时,若成功求出,返回非零值
            if( cvInvert(H,H_IVT) )
            {
//                //输出H_IVT
//                for(int i=0;i<3;i++)
//                    qDebug()<<cvmGet(H_IVT,i,0)<<cvmGet(H_IVT,i,1)<<cvmGet(H_IVT,i,2);
                cvReleaseMat(&H);//释放变换矩阵H,因为用不到了
                H = cvCloneMat(H_IVT);//将H的逆阵H_IVT中的数据拷贝到H中
                cvReleaseMat(&H_IVT);//释放逆阵H_IVT
                //将img1和img2对调
                IplImage * temp = img2;
                img2 = img1;
                img1 = temp;
                //cvShowImage(IMG1,img1);
                //cvShowImage(IMG2,img2);
                ui->mosaicButton->setEnabled(true);//激活全景拼接按钮
            }
            else//H不可逆时,返回0
            {
                cvReleaseMat(&H_IVT);//释放逆阵H_IVT
                QMessageBox::warning(this,tr("警告"),tr("变换矩阵H不可逆"));
            }
        }
        else
            ui->mosaicButton->setEnabled(true);//激活全景拼接按钮
    }
    else //无法计算出变换矩阵,即两幅图中没有重合区域
    {
        QMessageBox::warning(this,tr("警告"),tr("两图中无公共区域"));
    }

    ui->radioButton_horizontal->setEnabled(false);//禁用排列方向选择按钮
    ui->radioButton_vertical->setEnabled(false);
    ui->matchButton->setEnabled(false);//禁用特征匹配按钮
}