Ejemplo n.º 1
0
/** Computes the optical center of camera from camera projection matrix (http://en.wikipedia.org/wiki/Camera_matrix )
* @param pM : Camera projection matrix (3x4).
* @return : Optical center of camera.
*/
Point3D Utility::getOpticalCenter( CvMat* pM )
{
	CvMat *A = cvCreateMat(3, 3, CV_64FC1);
	CvMat *Ainv = cvCreateMat(3, 3, CV_64FC1);
	CvMat *b = cvCreateMat(3, 1, CV_64FC1);
	for(int i=0; i<3; ++i)
	{
		for(int j=0; j<3; ++j)
			cvmSet(A, i, j, cvmGet(pM,i,j));
		cvmSet(b, i, 0, cvmGet(pM, i,3));
	}
	cvInvert(A, Ainv);
	CvMat *oc = cvCreateMat(3, 1, CV_64FC1);
	cvMatMul(Ainv, b, oc);
	Point3D toRet;
	toRet.x = -1 * cvmGet(oc, 0, 0);				//NULL SPACE OF MATRIX pM
	toRet.y = -1 * cvmGet(oc, 1, 0);
	toRet.z = -1 * cvmGet(oc, 2, 0);

	cvReleaseMat(&A);
	cvReleaseMat(&Ainv);
	cvReleaseMat(&b);
	cvReleaseMat(&oc);
	return toRet;
}
Ejemplo n.º 2
0
int Matrix::pseudoinverse(Matrix* matA)
{
	//Comprobaciones previas
	//To do
	//Pinv calulation
	CvMat* pinvAux=cvCreateMat(Matrix::numFilas,Matrix::numColumnas,CV_32FC1);
	CvMat* sourceAux=cvCreateMat(Matrix::numFilas,Matrix::numColumnas,CV_32FC1);
	for(int fila=1;fila<=numFilas;fila++)
	{
		for(int columna=1;columna<=numColumnas;columna++)
		{
			//printf("%f\n",sourceAux->data.fl[1]);
			sourceAux->data.fl[(fila-1)*numColumnas+columna-1]=matA->getValueData(fila,columna);
			//printf("%f\n",sourceAux->data.fl[(fila-1)*numColumnas+columna-1]);
		}
	}
	cvInvert(sourceAux,pinvAux,CV_SVD);
	for(int fila=1;fila<=numFilas;fila++)
	{
		for(int columna=1;columna<=numColumnas;columna++)
		{
			Matrix::setValueData(pinvAux->data.fl[(fila-1)*numColumnas+columna-1],fila,columna);
		}
	}

	//End
	return 1;
}
Ejemplo n.º 3
0
void glbScreenPoint2GlobePoint(GlbPoint3d p1, GlbRotmat r, GlbPoint3d &p2)
{
	CvMat *GlbRotmat = cvCreateMat(3, 3, CV_32FC1);

	GlbRotmat->data.fl[0] = r.r11;
	GlbRotmat->data.fl[1] = r.r12;
	GlbRotmat->data.fl[2] = r.r13;
	GlbRotmat->data.fl[3] = r.r21;
	GlbRotmat->data.fl[4] = r.r22;
	GlbRotmat->data.fl[5] = r.r23;
	GlbRotmat->data.fl[6] = r.r31;
	GlbRotmat->data.fl[7] = r.r32;
	GlbRotmat->data.fl[8] = r.r33;	

	cvInvert( GlbRotmat, GlbRotmat);

	r.r11 = GlbRotmat->data.fl[0] ;
	r.r12 = GlbRotmat->data.fl[1];
	r.r13 = GlbRotmat->data.fl[2];
	r.r21 = GlbRotmat->data.fl[3];
	r.r22 = GlbRotmat->data.fl[4];
	r.r23 = GlbRotmat->data.fl[5];
	r.r31 = GlbRotmat->data.fl[6];
	r.r32 = GlbRotmat->data.fl[7];
	r.r33 = GlbRotmat->data.fl[8];	

	glbGlobePoint2ScreenPoint(p1, r, p2);
	cvReleaseMat(&GlbRotmat);
}
/*IplImage* HomographyCalculationThread::rectifyImage(IplImage* inImage, IplImage* outImage,double oldFirstApexX,double oldFirstApexY,double width,double height) 
{
	double x1 = 0;
	double x2 = 0;
	double x3 = 0;
	double y_position = 0;
	double x_position = 0;
	uchar* data = (uchar *)inImage->imageData;
	uchar* dataOut = (uchar *)outImage->imageData;
	for(int row=0;row<height;row++) {

		for(int col=0;col<width;col++) {

			x1 = cvmGet(mHMatrix,0,0) * col + cvmGet(mHMatrix,0,1) * row + cvmGet(mHMatrix,0,2);
			x2 = cvmGet(mHMatrix,1,0) * col + cvmGet(mHMatrix,1,1) * row + cvmGet(mHMatrix,1,2);
			x3 = cvmGet(mHMatrix,2,0) * col + cvmGet(mHMatrix,2,1) * row + 1;
			y_position = x2/x3 + oldFirstApexY;

			if(inImage->height < y_position) {
				y_position = (inImage->height-1);
			}

			x_position = x1/x3 + oldFirstApexX;
			if(inImage->width < x_position) {
				x_position = (inImage->width-1);
			}

			int temp_y = (int)y_position;
			int temp_x = (int)x_position;

			if(dataOut!=NULL && data!=NULL) {
				dataOut[row*outImage->widthStep+col*outImage->nChannels] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels];
				dataOut[row*outImage->widthStep+col*outImage->nChannels+1] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels+1];
				dataOut[row*outImage->widthStep+col*outImage->nChannels+2] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels+2];
			}
		}
	}
	cvReleaseMat(&mHMatrix);
	return outImage;
}
*/
void HomographyCalculationThread::correctHomographicMatrix(IplImage* inImage,CvMat* invH,double lastPictureApexX,double lastPictureApexY,double width,double height)
{
	CvMat *hCoeff = cvCreateMat(3,3,CV_32FC1);
	CvMat* multipleMat = cvCreateMat(3,3,CV_32FC1);
	double old_height = inImage->height;
	double y_position = lastPictureApexY;
	double x_position = lastPictureApexX;
	for (int i=1;i<10;i++) {
		double x1 = cvmGet(invH,0,0) * (x_position) + cvmGet(invH,0,1) * (y_position) + cvmGet(invH,0,2);
		double x2 = cvmGet(invH,1,0) * (x_position) + cvmGet(invH,1,1) * (y_position) + cvmGet(invH,1,2);
		double x3 = cvmGet(invH,2,0) * (x_position) + cvmGet(invH,2,1) * (y_position) + 1;
		x1 = x1/x3;
		x2 = x2/x3;

		double H_coeff = ((x1/width)+(x2/height))/2 - 0.01;
		for(int coeffRow=0;coeffRow<3;coeffRow++) {
			for(int coeffCol=0;coeffCol<3;coeffCol++) {
				cvmSet(hCoeff,coeffRow,coeffCol,H_coeff);
				cvmSet(multipleMat,coeffRow,coeffCol,cvmGet(mHMatrix,coeffRow,coeffCol));
			}
		}
		cvMul(multipleMat,hCoeff,mHMatrix);
		cvInvert(mHMatrix,invH);
	}

	cvReleaseMat(&multipleMat);
	cvReleaseMat(&hCoeff);
}
Ejemplo n.º 5
0
//! Performs one step of extremum interpolation. 
void interpolateStep(int r, int c, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b, double* xi, double* xr, double* xc )
//void interpolateStep()
{
	CvMat* dD, * H, * H_inv, X;
	double x[3] = { 0 };

	dD = deriv3D( r, c, t, m, b );
	H = hessian3D( r, c, t, m, b );
	H_inv = CreateMat( 3, 3, CV_64FC1 );
	cvInvert( H, H_inv, CV_SVD ); // incomplete check after invert() => CreateSVD()
	//cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
	InitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );  //check
	cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 );  //incomplete

	free(&dD);
	free(&H);
	free(&H_inv);

	//cvReleaseMat( &dD );
	//cvReleaseMat( &H );
	//cvReleaseMat( &H_inv );

	*xi = x[2];
	*xr = x[1];
	*xc = x[0];
}
Ejemplo n.º 6
0
Point2D getOptFlow(IplImage* currentFrame,Point2D p,IplImage* preFrame)
{
	Point2D temp;
	double b[2];
	b[0]=0;b[1]=0;
	
	double M11=0,M12=0,M22=0;
	for(int i = -OPTICAL_FLOW_POINT_AREA/2; i < OPTICAL_FLOW_POINT_AREA/2; i++)
	{
		for (int j = -OPTICAL_FLOW_POINT_AREA/2;j < OPTICAL_FLOW_POINT_AREA/2;j++)
		{
			temp = partial(currentFrame,Point2D(p.row+i,p.col+j));
			M11 += temp.dcol*temp.dcol;
			M12 += temp.dcol*temp.drow;
			M22 += temp.drow*temp.drow;
			b[0] += temp.dcol*(pixval8U(currentFrame,p.row+i,p.col+j)-pixval8U(preFrame,p.row+i,p.col+j));
			b[1] += temp.drow*(pixval8U(currentFrame,p.row+i,p.col+j)-pixval8U(preFrame,p.row+i,p.col+j));
		}
	}
	double a[] = {M11,M12,M12,M22};
	CvMat M=cvMat(2, 2, CV_64FC1, a);
	CvMat *Mi = cvCloneMat(&M);
	cvInvert(&M,Mi,CV_SVD);
	temp.col=0;
	temp.row=0;
	b[0] = -b[0];
	b[1] = -b[1];
	CvMat Mb = cvMat(2,1,CV_64FC1,b);
	CvMat *Mr = cvCloneMat(&Mb);
	cvMatMul( Mi, &Mb, Mr);
	double vy = (cvmGet(Mr,1,0));
	double vx = (cvmGet(Mr,0,0));
	
	return (Point2D(vy,vx));
}
Ejemplo n.º 7
0
Ematrix Ematrix::inverse() const{
	int r,c;
	if (rows != cols) { printf("INV: Matrix must be square\n");}
	assert(rows == cols);
	Ematrix mat(rows,cols,reservedRows,reservedCols);
	CvMat* ocvMat = cvCreateMat(rows,cols,CV_32FC1);
	int id=0;
	for (r = 0; r<rows; r++){
		for ( c = 0; c<cols; c++)
			cvmSet(ocvMat,r,c, val[id+c]);	
		id += reservedCols;
	}

	CvMat* ocvMatInv = cvCreateMat(rows,cols,CV_32FC1);
	cvInvert(ocvMat, ocvMatInv, CV_LU );
	id = 0;
	for (r = 0; r<rows; r++){
		for (c = 0; c<cols; c++)
			mat.val[id+c] = (float)cvmGet(ocvMatInv,r,c);
		id += reservedCols;
	}
	cvReleaseMat(&ocvMat);
	cvReleaseMat(&ocvMatInv);
	return mat;
}
Ejemplo n.º 8
0
//============================================================================
void AAM_IC::CalcHessian(CvMat* H, const CvMat* SD)
{
	CvMat* HH = cvCreateMat(H->rows, H->cols, CV_64FC1);
	cvMulTransposed(SD, HH, 0);// Equation (65)
	cvInvert(HH, H, CV_SVD);
	cvReleaseMat(&HH);
}
Ejemplo n.º 9
0
void blur_function(const IplImage *latent_image, IplImage *blur_image, const CvMat *hom1, const CvMat *hom2)
{
	const int T = 20;
	const int tau = 10;
	CvMat *id_mat = cvCreateMat(3, 3, CV_32FC1);
	cvSetIdentity(id_mat, cvRealScalar(1));
	CvMat *invhom1 = cvCreateMat(3, 3, CV_32FC1);
	cvInvert(hom1, invhom1, CV_LU);
	
	CvMat *h1 = cvCreateMat(3, 3, CV_32FC1);
	CvMat *h2 = cvCreateMat(3, 3, CV_32FC1);
	CvSize size = cvSize(latent_image->width, latent_image->height);
	IplImage *temp = cvCreateImage(size, latent_image->depth, latent_image->nChannels);
	IplImage *blur = cvCreateImage(size, IPL_DEPTH_32F, latent_image->nChannels);
	cvSetZero(blur);
	
	for (int i = 1; i <= tau; ++i)
	{
		cvAddWeighted(id_mat, (double)(T-i)/T, invhom1, (double)i/T, 0, h1);
		cvAddWeighted(id_mat, (double)(T-i)/T, hom2, (double)i/T, 0, h2);
		cvWarpPerspective(latent_image, temp, h1, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		cvAdd(blur, temp, blur, NULL);
		cvWarpPerspective(latent_image, temp, h2, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		cvAdd(blur, temp, blur, NULL);
	}
	cvAdd(blur, latent_image, blur, NULL);
	cvConvertScale(blur, blur_image, 1.0/(2*tau+1), 0);
	
	cvReleaseMat(&id_mat);
	cvReleaseMat(&invhom1);
	cvReleaseMat(&h1);
	cvReleaseMat(&h2);
	cvReleaseImage(&temp);
	cvReleaseImage(&blur);
}
Ejemplo n.º 10
0
//============================================================================
void AAM_Basic::CalcGradientMatrix(const CvMat* CParams, 
								   const CvMat* vCDisps,
								   const CvMat* vPoseDisps,
								   const std::vector<AAM_Shape>& AllShapes, 
								   const std::vector<IplImage*>& AllImages)
{
	int npixels = __cam.__texture.nPixels();
	int np = __cam.nModes();

	// do model parameter experiments
    {
		printf("Calculating parameter gradient matrix...\n");
		CvMat* GParam = cvCreateMat(np, npixels, CV_64FC1);cvZero(GParam);
        CvMat* GtG = cvCreateMat(np, np, CV_64FC1);
        CvMat* GtGInv = cvCreateMat(np, np, CV_64FC1);
        
		// estimate Rc
		EstCParamGradientMatrix(GParam, CParams, AllShapes, AllImages, vCDisps);
        __Rc = cvCreateMat(np, npixels, CV_64FC1);
        cvGEMM(GParam, GParam, 1, NULL, 0, GtG, CV_GEMM_B_T);
        cvInvert(GtG, GtGInv, CV_SVD );
		cvMatMul(GtGInv, GParam, __Rc);
        
		cvReleaseMat(&GtG);
		cvReleaseMat(&GtGInv);
		cvReleaseMat(&GParam);
    }

	// do pose experiments, this is for global shape normalization
    {
		printf("Calculating pose gradient matrix...\n");
		CvMat* GtG = cvCreateMat(4, 4, CV_64FC1);
        CvMat* GtGInv = cvCreateMat(4, 4, CV_64FC1);
        CvMat* GPose = cvCreateMat(4, npixels, CV_64FC1);   cvZero(GPose);

        // estimate Rt
        EstPoseGradientMatrix(GPose, CParams, AllShapes, AllImages, vPoseDisps);
        __Rq = cvCreateMat(4, npixels, CV_64FC1);
        cvGEMM(GPose, GPose, 1, NULL, 0, GtG, CV_GEMM_B_T);
        cvInvert(GtG, GtGInv, CV_SVD);
		cvMatMul(GtGInv, GPose, __Rq);
        
		cvReleaseMat(&GtG);
		cvReleaseMat(&GtGInv);
		cvReleaseMat(&GPose);
    }
}
Ejemplo n.º 11
0
void KalmanSensor::update_K(CvMat *P_pred) {
	// K = P * trans(H) * inv(H*P*trans(H) + R)
	cvTranspose(H, H_trans);
	cvMatMul(P_pred, H_trans, K);
	cvMatMul(H, K, R_tmp);
	cvScaleAdd(R_tmp, cvScalar(1), R, R_tmp);
	cvInvert(R_tmp, R_tmp);
	cvMatMul(H_trans, R_tmp, K);
	cvMatMul(P_pred, K, K);
}
Ejemplo n.º 12
0
int main (int argc, char **argv)
{
	int i, j;
	int nrow = 3;
	int ncol = 3;
	CvMat *src, *dst, *mul;
	double det;
	CvRNG rng = cvRNG (time (NULL));      /* 乱数の初期化 */
	// (1) 行列のメモリ確保
	src = cvCreateMat (nrow, ncol, CV_32FC1);
	dst = cvCreateMat (ncol, nrow, CV_32FC1);
	mul = cvCreateMat (nrow, nrow, CV_32FC1);
	// (2) 行列srcに乱数を代入
	printf ("src\n");
	cvmSet (src, 0, 0, 1);
	for (i = 0; i < src->rows; i++)
	{
		for (j = 0; j < src->cols; j++)
		{
			cvmSet (src, i, j, cvRandReal (&rng));
			printf ("% lf\t", cvmGet (src, i, j));
		}
		printf ("\n");
	}
	// (3) 行列srcの逆行列を求めて,行列dstに代入
	det = cvInvert (src, dst, CV_SVD);
	// (4) 行列srcの行列式を表示
	printf ("det(src)=%lf\n", det);
	// (5) 行列dstの表示
	printf ("dst\n");
	for (i = 0; i < dst->rows; i++)
	{
		for (j = 0; j < dst->cols; j++)
		{
			printf ("% lf\t", cvmGet (dst, i, j));
		}
		printf ("\n");
	}
	// (6) 行列srcとdstの積を計算して確認
	cvMatMul (src, dst, mul);
	printf ("mul\n");
	for (i = 0; i < mul->rows; i++)
	{
		for (j = 0; j < mul->cols; j++)
		{
			printf ("% lf\t", cvmGet (mul, i, j));
		}
		printf ("\n");
	}
	// (7) 行列のメモリを開放
	cvReleaseMat (&src);
	cvReleaseMat (&dst);
	cvReleaseMat (&mul);
	return 0;
}
Ejemplo n.º 13
0
PerspectiveTransform * PerspectiveTransform::accumulateInverse(const Transform * T2) const
{
    PerspectiveTransform * m = new PerspectiveTransform;
    PerspectiveTransform T2_inv(*dynamic_cast<const PerspectiveTransform *>(T2)); //make a copy

    cvInvert(T2_inv, T2_inv);

    cvMatMul(T2_inv, *this, *m); //don't need casts as opencv will check dims agree.
    matMul(T2_inv.data.db, data.db, m->data.db, 3, 3, 3);
    return m;
}
CvMat* HomographyCalculationThread::calculateHomographicMatrix(double newPictureApexX[],double newPictureApexY[],double pictureApexXPosition[],double pictureApexYPosition[]) 
{
	CvMat* mmat = cvCreateMat(3,3,CV_32FC1);
	CvMat* a = cvCreateMat(POINTS*2,9,CV_32FC1);
	for(int count=1;count<POINTS+1;count++) {
		cvmSet(a,2*count-2,0,newPictureApexX[count-1]);
		cvmSet(a,2*count-2,1,newPictureApexY[count-1]);
		cvmSet(a,2*count-2,2,1);
		cvmSet(a,2*count-2,3,0);
		cvmSet(a,2*count-2,4,0);
		cvmSet(a,2*count-2,5,0);
		cvmSet(a,2*count-2,6,(-newPictureApexX[count-1]*pictureApexXPosition[count-1]));
		cvmSet(a,2*count-2,7,(-pictureApexXPosition[count-1]*newPictureApexY[count-1]));
		cvmSet(a,2*count-2,8,-pictureApexXPosition[count-1]);
		cvmSet(a,2*count-1,0,0);
		cvmSet(a,2*count-1,1,0);
		cvmSet(a,2*count-1,2,0);
		cvmSet(a,2*count-1,3,newPictureApexX[count-1]);
		cvmSet(a,2*count-1,4,newPictureApexY[count-1]);
		cvmSet(a,2*count-1,5,1);
		cvmSet(a,2*count-1,6,(-newPictureApexX[count-1]*pictureApexYPosition[count-1]));
		cvmSet(a,2*count-1,7,(-pictureApexYPosition[count-1]*newPictureApexY[count-1]));
		cvmSet(a,2*count-1,8,-pictureApexYPosition[count-1]);
	}
	CvMat* U  = cvCreateMat(8,8,CV_32FC1);
	CvMat* D  = cvCreateMat(8,9,CV_32FC1);
	CvMat* V  = cvCreateMat(9,9,CV_32FC1);
	CvMat* V22 = cvCreateMat(3,3,CV_32FC1);
	mHMatrix = cvCreateMat(3,3,CV_32FC1);
	CvMat* invH = cvCreateMat(3,3,CV_32FC1);
	cvSVD(a, D, U, V, CV_SVD_U_T|CV_SVD_V_T);

	for(int a=0;a<3;a++) {
		for(int b=0;b<3;b++) {
			cvmSet(mmat,a,b,cvmGet(V,8,a*3+b));
			cvmSet(V22,a,b,(1/cvmGet(V,8,4)));
		}
	}

	cvMul(mmat,V22,mHMatrix);
	cvInvert(mHMatrix,invH);
	cvReleaseMat(&U);
	cvReleaseMat(&D);
	cvReleaseMat(&V);
	cvReleaseMat(&V22);
	cvReleaseMat(&a);
	cvReleaseMat(&mmat);
	return invH;
}
void ConformalResizing::Constrian(const ConstrainUnits& unit, CvMat*& M)
{
	// Preprocess unit to make Matrix M less singular
	double meanX(0), meanY(0);
	for (int i = 0; i < unit.n; i++)
	{
		meanX += unit.pnts[i].x;
		meanY += unit.pnts[i].y;
	}
	meanX /= unit.n;
	meanY /= unit.n;

	int n = unit.n * 2;
	M = cvCreateMat(n, n, CV_64F);
	CvMat* A = cvCreateMat(n, 4, CV_64F);
	CvMat* Q = cvCreateMat(n, 4, CV_64F);
	CvMat* P = cvCreateMat(4, 4, CV_64F);

	// Initial A
	cvZero(A);
	for (int i = 0; i < unit.n; i++)
	{
		double x = unit.pnts[i].x - meanX;
		double y = unit.pnts[i].y - meanY;
		CV_MAT_ELEM(*A, double, 2*i, 0) = x;
		CV_MAT_ELEM(*A, double, 2*i, 1) = -y;
		CV_MAT_ELEM(*A, double, 2*i, 2) = 1;

		CV_MAT_ELEM(*A, double, 2*i+1, 0) = y;
		CV_MAT_ELEM(*A, double, 2*i+1, 1) = x;
		CV_MAT_ELEM(*A, double, 2*i+1, 3) = 1;
	}
	cvMulTransposed(A, P, 1); // P = (A^T * A)
	cvInvert(P, P, CV_SVD_SYM); // P = (A^T * A)^(-1)
	cvMatMul(A, P, Q); 
	cvGEMM(Q, A, 1, NULL, 0, M, CV_GEMM_B_T);

	// M = M - I
	double* d = M->data.db;
	for (int i = 0; i < n; i++, d += n+1)
	{
		*d -= 1;
	}

	cvReleaseMat(&A);
	cvReleaseMat(&Q);
	cvReleaseMat(&P);
}
Ejemplo n.º 16
0
double pkmGaussianMixtureModel::multinormalDistribution(const CvMat *pts, const CvMat *mean, const CvMat *covar)
{
	
	int dimensions = 2;
	//  add a tiny bit because of small samples
	CvMat *covarShifted = cvCreateMat(2, 2, CV_64FC1);
	cvAddS( covar, cvScalarAll(0.001), covarShifted);
	
	// calculate the determinant
	double det = cvDet(covarShifted);
	
	// invert covariance
	CvMat *covarInverted = cvCreateMat(2, 2, CV_64FC1);
	cvInvert(covarShifted, covarInverted);
	
	double ff = (1.0/(2.0*(double)PI))*(pow(det,-0.5));
	
	CvMat *centered = cvCreateMat(2, 1, CV_64FC1);
	cvSub(pts, mean, centered);
	
	CvMat *invxmean = cvCreateMat(2, 1, CV_64FC1);
	//cvGEMM(covarInverted, centered, 1., NULL, 1., invxmean);
	cvMatMul(covarInverted, centered, invxmean);
	
	cvMul(centered, invxmean, invxmean);
	CvScalar sum = cvSum(invxmean);
	/*
	 printf("covar: %f %f %f %f\n", cvmGet(covar, 0, 0), cvmGet(covar, 0, 1), cvmGet(covar, 1, 0), cvmGet(covar, 1, 1));
	 printf("covarShifted: %f %f %f %f\n", cvmGet(covarShifted, 0, 0), cvmGet(covarShifted, 0, 1), cvmGet(covarShifted, 1, 0), cvmGet(covarShifted, 1, 1));
	 printf("det: %f\n", det);
	 printf("covarInverted: %f %f %f %f\n", cvmGet(covarInverted, 0, 0), cvmGet(covarInverted, 0, 1), cvmGet(covarInverted, 1, 0), cvmGet(covarShifted, 1, 1));
	 printf("ff: %f\n", ff);
	 printf("pts: %f %f)\n", cvmGet(pts, 0, 0), cvmGet(pts, 1, 0));
	 printf("mean: %f %f)\n", cvmGet(mean, 0, 0), cvmGet(mean, 1, 0));
	 printf("centered: %f %f)\n", cvmGet(centered, 0, 0), cvmGet(centered, 1, 0));
	 printf("invxmean: %f %f)\n", cvmGet(invxmean, 0, 0), cvmGet(invxmean, 1, 0));
	 printf("scalar: %f %f %f %f\n", sum.val[0], sum.val[1], sum.val[2], sum.val[3]);
	 */
	cvReleaseMat(&covarShifted);
	cvReleaseMat(&covarInverted);
	cvReleaseMat(&centered);
	cvReleaseMat(&invxmean);
	
	return ff * exp(-0.5*sum.val[0]);
	
}
Ejemplo n.º 17
0
bool MixGaussian::AddGaussian(
		CvMat * MeanMat,
		CvMat * CovMat,
		float Weight)
{
	if(_nMixture == MAX_NUM_GAUSSIAN)
		return false;

	_MeanMat[_nMixture] = cvCloneMat(MeanMat);
	_CovMat [_nMixture] = cvCloneMat(CovMat);
	_CovMatI[_nMixture] = cvCloneMat(CovMat);
	cvInvert(CovMat, _CovMatI[_nMixture]);
	_Weight [_nMixture] = Weight;

	_nMixture++;

	return true;
}
Ejemplo n.º 18
0
AffineTransform * AffineTransform::accumulateInt(const Transform * T2, bool bInvert) const
{
    AffineTransform * m = new AffineTransform;

    // Just expand mat. multiplication to avoid converting to 3x3
    const double * s1 = 0, * s2 = 0;
    double invMatData[3*3]; //here so stays in scope when pointer copied
    if(bInvert)
    {
        //Invert 3x3 mat
        double T2MatData[3*3];
        CvMat invMat = cvMat(3, 3, CV_64FC1, invMatData);
        CvMat T2Mat = cvMat(3, 3, CV_64FC1, T2MatData);

        const double * affineT2Data = dynamic_cast<const AffineTransform *>(T2)->data_;
        for(int i=0; i<6; i++)
            T2MatData[i] = affineT2Data[i];
        
        //Last row of affine mat
        T2MatData[6] = 0;
        T2MatData[7] = 0;
        T2MatData[8] = 1;

        cvInvert(&T2Mat, &invMat);

        s1 = invMatData;
        s2 = this->data_;
    }
    else
    {
        s1 = this->data_;
        s2 = dynamic_cast<const AffineTransform *>(T2)->data_;
    }

    for(int row=0; row<2; row++)
    {
        (*m)(row,0) = s1[0]*s2[0] + s1[1]*s2[0+3];
        (*m)(row,1) = s1[0]*s2[1] + s1[1]*s2[1+3];
        (*m)(row,2) = s1[0]*s2[2] + s1[1]*s2[2+3] + s1[2];
        s1 += COLS; //skip to second 
    }
    return m;
}
Ejemplo n.º 19
0
matrix matrix::inverse() const{
	int r;
	if (rows != cols) { printf("INV: Matrix must be square\n");}
	assert(rows == cols);
	matrix mat(rows,cols);
	CvMat* ocvMat = cvCreateMat(rows,cols,CV_32FC1);
	for (r = 0; r<rows; r++)
		for (int c = 0; c<cols; c++)
			cvmSet(ocvMat,r,c,get(r,c));	
	
	CvMat* ocvMatInv = cvCreateMat(rows,cols,CV_32FC1);
	cvInvert(ocvMat, ocvMatInv, CV_LU );
	for (r = 0; r<rows; r++)
		for (int c = 0; c<cols; c++)
			mat.set(r,c, (float)cvmGet(ocvMatInv,r,c));
	cvReleaseMat(&ocvMat);
	cvReleaseMat(&ocvMatInv);
	return mat;
}
Ejemplo n.º 20
0
//! Performs one step of extremum interpolation. 
void FastHessian::interpolateStep(int r, int c, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b, 
                                  double* xi, double* xr, double* xc )
{
  CvMat* dD, * H, * H_inv, X;
  double x[3] = { 0 };

  dD = deriv3D( r, c, t, m, b );
  H = hessian3D( r, c, t, m, b );
  H_inv = cvCreateMat( 3, 3, CV_64FC1 );
  cvInvert( H, H_inv, CV_SVD );
  cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
  cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 );

  cvReleaseMat( &dD );
  cvReleaseMat( &H );
  cvReleaseMat( &H_inv );

  *xi = x[2];
  *xr = x[1];
  *xc = x[0];
}
Ejemplo n.º 21
0
void interp_step( IplImage*** dog_pyr, int octv, int intvl, int r, int c,
				 double* xi, double* xr, double* xc )
{
	CvMat* dD, * H, * H_inv, X;
	double x[3] = { 0 };

	dD = deriv_3D( dog_pyr, octv, intvl, r, c );
	H = hessian_3D( dog_pyr, octv, intvl, r, c );
	H_inv = cvCreateMat( 3, 3, CV_64FC1 );
	cvInvert( H, H_inv, CV_SVD );
	cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
	cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 );

	cvReleaseMat( &dD );
	cvReleaseMat( &H );
	cvReleaseMat( &H_inv );

	*xi = x[2];
	*xr = x[1];
	*xc = x[0];
}
Ejemplo n.º 22
0
static double luck_pixel(int x, int y, const CvMat *hom1, const CvMat *hom2)
{
	CvMat *invhom1 = cvCreateMat(3, 3, CV_32FC1);
	cvInvert(hom1, invhom1, CV_LU);
	//inhom1到上一帧的映射变换
	
	CvPoint2D32f src = cvPoint2D32f(x, y);
	CvPoint2D32f d1, d2;
	CvMat pt_src = cvMat(1, 1, CV_32FC2, &src);
	CvMat pt_dst = cvMat(1, 1, CV_32FC2, &d1);
	cvPerspectiveTransform(&pt_src, &pt_dst, invhom1);
	//透视转换成上一帧
	pt_dst = cvMat(1, 1, CV_32FC2, &d2);
	cvPerspectiveTransform(&pt_src, &pt_dst, hom2);
	//透视转换为下一帧
	//得到d1和d2,为前后一帧相对应的点
	double dis = (src.x-d1.x)*(src.x-d1.x)+(src.y-d1.y)*(src.y-d1.y);
	dis += (src.x-d2.x)*(src.x-d2.x)+(src.y-d2.y)*(src.y-d2.y);
	double luck = exp(-dis/(2*SIGMA_L*SIGMA_L));
	cvReleaseMat(&invhom1);
	return luck;
}
Ejemplo n.º 23
0
/*
Performs one step of extremum interpolation.  Based on Eqn. (3) in Lowe's paper.
@param dog_pyr difference of Gaussians scale space pyramid
@param octv octave of scale space
@param intvl interval being interpolated
@param r row being interpolated
@param c column being interpolated
@param xi output as interpolated subpixel increment to interval
@param xr output as interpolated subpixel increment to row
@param xc output as interpolated subpixel increment to col
*/
static void interp_step( IplImage*** dog_pyr, int octv, int intvl, int r, int c,
						 double* xi, double* xr, double* xc )
{
	CvMat* dD, * H, * H_inv, X;
	double x[3] = { 0 };

    //在DoG金字塔中计算某点的x方向、y方向以及尺度方向上的偏导数,结果存放在列向量dD中
	dD = deriv_3D( dog_pyr, octv, intvl, r, c );
    //在DoG金字塔中计算某点的3*3海森矩阵
	H = hessian_3D( dog_pyr, octv, intvl, r, c );
    H_inv = cvCreateMat( 3, 3, CV_64FC1 );//海森矩阵的逆阵
	cvInvert( H, H_inv, CV_SVD );
	cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
    //X = - H^(-1) * dD,H的三个元素分别是x,y,σ方向上的偏移量(具体见SIFT算法说明)
	cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 );

	cvReleaseMat( &dD );
	cvReleaseMat( &H );
	cvReleaseMat( &H_inv );

    *xi = x[2];//σ方向(层方向)偏移量
    *xr = x[1];//y方向上偏移量
    *xc = x[0];//x方向上偏移量
}
Ejemplo n.º 24
0
int opticaltri( CvMat * &clean_texture, int verts )
{
	char * im1fname = "conhull-dirty-thresh.jpg";
	char * im2fname = "conhull-clean-thresh.jpg";

	int count = MAX_COUNT;
	char * status;
	
	CvPoint2D32f * source_points;
	CvPoint2D32f * dest_points;
	CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));

	// count = opticalflow( im1fname, im2fname, source_points, dest_points, status ); 
	count = findsiftpoints( "conhull-dirty.jpg", "conhull-clean.jpg", source_points, dest_points, status ); 

	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);

	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);

	cvSet( image1, cvScalarAll(255) );

	std::map<CvPoint, CvPoint> point_lookup_map;
	std::vector<std::pair<CvPoint, CvPoint> > point_lookup;

	int num_matches = 0;
	int num_out_matches = 0;
	int max_dist = 50;
	int offset = 200;	

	// put corners in the point lookup as going to themselves
	point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
	point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
	point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
	point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);

	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));

	printf("Inserting corners...");
	// put corners in the Delaunay subdivision
	for(unsigned int i = 0; i < point_lookup.size(); i++) {
		cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
	}
	printf("done.\n");

	CvSubdiv2DEdge proxy_edge;
	for(int i = 0; i < count; i++) {
		if(status[i]) {
			CvPoint source = cvPointFrom32f(source_points[i]);
			CvPoint dest = cvPointFrom32f(dest_points[i]);
	
			if((((int)fabs((double)(source.x - dest.x))) > max_dist) ||
				 (((int)fabs((double)(source.y - dest.y))) > max_dist)) {	
				num_out_matches++;
			}
			else if((dest.x >= 0) && (dest.y >= 0) && (dest.x < (image1->width)) && (dest.y < (image1->height))) {
				if(point_lookup_map.find(source) == point_lookup_map.end()) {
					num_matches++;
				
					point_lookup_map[source] = dest;
					point_lookup.push_back(std::pair<CvPoint,CvPoint>(source,dest));
					// delaunay_points[i] = 
					(cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(source) ))->pt;
					cvSetImageROI( image1, cvRect(source.x-8,source.y-8,8*2,8*2) );
					cvResetImageROI( image2 );
					cvGetRectSubPix( image2, image1, dest_points[i] );
				}
				/*
				cvSet2D( image1, source.y, source.x, cvGet2D( image2, dest.y, dest.x ) );
				cvSet2D( image1, source.y, source.x+1, cvGet2D( image2, dest.y, dest.x+1 ) );
				cvSet2D( image1, source.y, source.x-1, cvGet2D( image2, dest.y, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x, cvGet2D( image2, dest.y+1, dest.x ) );
				cvSet2D( image1, source.y-1, source.x, cvGet2D( image2, dest.y-1, dest.x ) );
				cvSet2D( image1, source.y+1, source.x+1, cvGet2D( image2, dest.y+1, dest.x+1 ) );
				cvSet2D( image1, source.y-1, source.x-1, cvGet2D( image2, dest.y-1, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x-1, cvGet2D( image2, dest.y+1, dest.x-1 ) );
				cvSet2D( image1, source.y-1, source.x+1, cvGet2D( image2, dest.y-1, dest.x+1 ) );
				*/

				// cvCircle( image1, source, 4, CV_RGB(255,0,0), 2, CV_AA );
				// cvCircle( image2, dest, 4, CV_RGB(255,0,0), 2, CV_AA );
			}

			/*
			cvSetImageROI( image1, cvRect(source.x-offset,source.y-offset,offset*2,offset*2) );
			cvSetImageROI( image2, cvRect(dest.x-offset,dest.y-offset,offset*2,offset*2) );
			cvNamedWindow("image1",0);
			cvNamedWindow("image2",0);
			cvShowImage("image1",image1);
			cvShowImage("image2",image2);
			printf("%d,%d -> %d,%d\n",source.x,source.y,dest.x,dest.y);
			cvWaitKey(0);
			cvDestroyAllWindows();
			*/
		}
	}
	printf("%d %d\n",num_matches,num_out_matches);
	printf("%d lookups\n",point_lookup_map.size());

	cvResetImageROI( image1 );

	cvSaveImage("sparse.jpg", image1);

	cvReleaseImage(&image1);
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	cvSet( image1, cvScalarAll(255) );
	printf("Warping image...");

	CvSeqReader  reader;
	int total = delaunay->edges->total;
	int elem_size = delaunay->edges->elem_size;


	std::vector<Triangle> trivec;
	std::vector<CvMat *> baryinvvec;

	for( int i = 0; i < total*2; i++ ) {
		if((i == 0) || (i == total)) {
			cvStartReadSeq( (CvSeq*)(delaunay->edges), &reader, 0 );
		}
		CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);

		if( CV_IS_SET_ELEM( edge ))	{
			CvSubdiv2DEdge curedge = (CvSubdiv2DEdge)edge;
			CvSubdiv2DEdge t = curedge;
			Triangle temptri;
			int count = 0;
			
			// construct a triangle from this edge
			do {
				CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
				if(count < 3) {
					pt->pt.x = pt->pt.x >= image1->width ? image1->width-1 : pt->pt.x;
					pt->pt.y = pt->pt.y >= image1->height ? image1->height-1 : pt->pt.y;
					pt->pt.x = pt->pt.x < 0 ? 0 : pt->pt.x;
					pt->pt.y = pt->pt.y < 0 ? 0 : pt->pt.y;

					temptri.points[count] = cvPointFrom32f( pt->pt );
				}
				else {
					printf("More than 3 edges\n");
				}
				count++;
				if(i < total)
					t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
				else
					t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_RIGHT );
			} while( t != curedge );
			
			// check that triangle is not already in
			if( std::find(trivec.begin(), trivec.end(), temptri) == trivec.end() ) {
				// push triangle in and draw
				trivec.push_back(temptri);
				cvLine( image1, temptri.points[0], temptri.points[1], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[1], temptri.points[2], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[2], temptri.points[0], CV_RGB(255,0,0), 1, CV_AA, 0 );

				// compute barycentric computation vector for this triangle
				CvMat * barycen = cvCreateMat( 3, 3, CV_32FC1 );
				CvMat * baryceninv = cvCreateMat( 3, 3, CV_32FC1 );

				barycen->data.fl[3*0+0] = temptri.points[0].x;
				barycen->data.fl[3*0+1] = temptri.points[1].x;
				barycen->data.fl[3*0+2] = temptri.points[2].x;
				barycen->data.fl[3*1+0] = temptri.points[0].y;
				barycen->data.fl[3*1+1] = temptri.points[1].y;
				barycen->data.fl[3*1+2] = temptri.points[2].y;
				barycen->data.fl[3*2+0] = 1;
				barycen->data.fl[3*2+1] = 1;
				barycen->data.fl[3*2+2] = 1;

				cvInvert( barycen, baryceninv, CV_LU );
				baryinvvec.push_back(baryceninv);

				cvReleaseMat( &barycen );
			}
		}

		CV_NEXT_SEQ_ELEM( elem_size, reader );
	}
	printf("%d triangles...", trivec.size());
	cvSaveImage("triangles.jpg", image1);
	
	cvSet( image1, cvScalarAll(255) );
	IplImage * clean_nonthresh = cvLoadImage( "conhull-clean.jpg", CV_LOAD_IMAGE_COLOR );

	// for each triangle
	for(unsigned int i = 0; i < trivec.size(); i++) {
		Triangle curtri = trivec[i];
		CvMat * curpoints = cvCreateMat( 1, 3, CV_32SC2 );
		Triangle target;
		std::map<CvPoint,CvPoint>::iterator piter[3];
		
		printf("Triangle %d / %d\n",i,trivec.size());
		int is_corner = 0;
		for(int j = 0; j < 3; j++) {
			/*
			curpoints->data.i[2*j+0] = curtri.points[j].x;
			curpoints->data.i[2*j+1] = curtri.points[j].y;
			*/
			CV_MAT_ELEM( *curpoints, CvPoint, 0, j ) = curtri.points[j];
			printf("%d,%d\n",curtri.points[j].x,curtri.points[j].y);
	
			if((curtri.points[j] == cvPoint(0,0)) ||  (curtri.points[j] == cvPoint(0,image1->height - 1)) ||(curtri.points[j] == cvPoint(image1->width - 1,0)) ||(curtri.points[j] == cvPoint(image1->width - 1,image1->height - 1))) {
				is_corner++;
			}
			

			for(unsigned int k = 0; k < point_lookup.size(); k++) {
				std::pair<CvPoint,CvPoint> thispair = point_lookup[k];
				if(thispair.first == curtri.points[j]) {
					target.points[j] = thispair.second;
					break;
				}
			}

			/*
			piter[j] = point_lookup_map.find(curtri.points[j]);
			if(piter[j] != point_lookup_map.end() ) {
				target.points[j] = piter[j]->second;
			}
			*/
		}
			
		// if((piter[0] != point_lookup_map.end()) && (piter[1] != point_lookup_map.end()) && (piter[2] != point_lookup_map.end())) {
		if(is_corner < 3) {
			CvMat * newcorners = cvCreateMat( 3, 3, CV_32FC1 );
			newcorners->data.fl[3*0+0] = target.points[0].x;
			newcorners->data.fl[3*0+1] = target.points[1].x;
			newcorners->data.fl[3*0+2] = target.points[2].x;
			newcorners->data.fl[3*1+0] = target.points[0].y;
			newcorners->data.fl[3*1+1] = target.points[1].y;
			newcorners->data.fl[3*1+2] = target.points[2].y;
			newcorners->data.fl[3*2+0] = 1;
			newcorners->data.fl[3*2+1] = 1;
			newcorners->data.fl[3*2+2] = 1;

			CvContour hdr;
			CvSeqBlock blk;
			CvRect trianglebound = cvBoundingRect( cvPointSeqFromMat(CV_SEQ_KIND_CURVE+CV_SEQ_FLAG_CLOSED, curpoints, &hdr, &blk), 1 );
			printf("Bounding box: %d,%d,%d,%d\n",trianglebound.x,trianglebound.y,trianglebound.width,trianglebound.height);
			for(int y = trianglebound.y; (y < (trianglebound.y + trianglebound.height)) && ( y < image1->height); y++) {
				for(int x = trianglebound.x; (x < (trianglebound.x + trianglebound.width)) && (x < image1->width); x++) {
					// check to see if we're inside this triangle
					/*
					CvPoint v0 = cvPoint( curtri.points[2].x - curtri.points[0].x, curtri.points[2].y - curtri.points[0].y );
					CvPoint v1 = cvPoint( curtri.points[1].x - curtri.points[0].x, curtri.points[1].y - curtri.points[0].y );
					CvPoint v2 = cvPoint( x - curtri.points[0].x, y - curtri.points[0].y );
					
					int dot00 = v0.x * v0.x + v0.y * v0. y;
					int dot01 = v0.x * v1.x + v0.y * v1. y;
					int dot02 = v0.x * v2.x + v0.y * v2. y;
					int dot11 = v1.x * v1.x + v1.y * v1. y;
					int dot12 = v1.x * v2.x + v1.y * v2. y;

					double invDenom = 1.0 / (double)(dot00 * dot11 - dot01 * dot01);
					double u = (double)(dot11 * dot02 - dot01 * dot12) * invDenom;
					double v = (double)(dot00 * dot12 - dot01 * dot02) * invDenom;
					*/

					CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
					CvMat * result = cvCreateMat(3, 1, CV_32FC1);
					curp->data.fl[0] = x;
					curp->data.fl[1] = y;
					curp->data.fl[2] = 1;
					cvMatMul( baryinvvec[i], curp, result );
					// double u = result->data.fl[0]/result->data.fl[2];
					// double v = result->data.fl[1]/result->data.fl[2];

					/*
					if((i == 3019) && (y == 1329) && (x > 2505) && (x < 2584)) {
						printf("Range %d: %f, %f, %f\t%f, %f, %f\n",x,result->data.fl[0],result->data.fl[1],result->data.fl[2],
								sourcepoint->data.fl[0],sourcepoint->data.fl[1],sourcepoint->data.fl[2]);
					}
					*/

					if( (result->data.fl[0] > MIN_VAL) && (result->data.fl[1] > MIN_VAL) && (result->data.fl[2] > MIN_VAL) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					// if((u > 0) || (v > 0) /*&& ((u +v) < 1)*/ )
						// printf("Barycentric: %f %f %f\n", result->data.fl[0], result->data.fl[1], result->data.fl[2]);
						// this point is inside this triangle
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//	trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);
						
						CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
						cvMatMul( newcorners, result, sourcepoint );	
					
						double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
						double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
						if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
							// printf("%d,%d %d,%d\n",x,y,(int)sourcex,(int)sourcey);
							cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
						}
	
						
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

						cvReleaseMat( &sourcepoint );
					}
					cvReleaseMat( &result );
					cvReleaseMat( &curp );
				}
			}
			
			for(int k = 0; k < verts; k++) {
				double x = clean_texture->data.fl[2*k+0];
				double y = clean_texture->data.fl[2*k+1];
				
				// check to see if we're inside this triangle
				CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
				CvMat * result = cvCreateMat(3, 1, CV_32FC1);
				curp->data.fl[0] = x;
				curp->data.fl[1] = y;
				curp->data.fl[2] = 1;
				cvMatMul( baryinvvec[i], curp, result );
			
				if( (result->data.fl[0] > MIN_VAL) && (result->data.fl[1] > MIN_VAL) && (result->data.fl[2] > MIN_VAL) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					
					CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
					cvMatMul( newcorners, result, sourcepoint );	
				
					double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
					double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
					if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
						clean_texture->data.fl[2*k+0] = sourcex;
						clean_texture->data.fl[2*k+1] = sourcey;
						// cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
					}

					cvReleaseMat( &sourcepoint );
				}
				cvReleaseMat( &result );
				cvReleaseMat( &curp );
			}
			cvReleaseMat( &newcorners );
		}
		cvReleaseMat( &curpoints );
	}

	cvReleaseImage( &clean_nonthresh );

	printf("done.\n");

	cvSaveImage("fullwarp.jpg", image1);

	printf("Drawing subdivisions on warped image...");
	draw_subdiv( image1, delaunay, NULL, NULL, 0, NULL );
	// draw_subdiv( image1, delaunay, delaunay_points, source_points, count, status );
	printf("done.\n");
	
	cvSaveImage("edgeswarp.jpg", image1);

	cvReleaseImage(&image2);

	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
	// cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 3 );

	// cvCalcSubdivVoronoi2D( delaunay );
	printf("Drawing subdivisions on unwarped image...");
	// draw_subdiv( image2, delaunay, delaunay_points, dest_points, count, status );
	// draw_subdiv( image2, delaunay, NULL, NULL, 0, NULL );
	printf("done.\n");

	cvSaveImage("edges.jpg",image2);

	cvReleaseImage(&image1);
	cvFree(&source_points);
	cvFree(&dest_points);
	cvFree(&status);
	cvReleaseMemStorage(&storage);
	cvFree(&delaunay_points);

	cvReleaseImage(&image2);

	return 0;
}
Ejemplo n.º 25
0
void MyMat::Inv(const MyMat& src)
{
	cvInvert(src.m_data, m_data);
}
Ejemplo n.º 26
0
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
CvMat *savgolFilter(CvMat &z, double ra, double rb, double theta){

	int d=2;
	int k=1; // Always these, according to the matlab code (Leo)
	ra = max(1.5,ra);
	rb = max(1.5,rb);
	double ira2 = 1.0/(ra*ra);
	double irb2 = 1.0/(rb*rb);
	double wr = floor(max(ra,rb));
	double wd = 2*wr+1;
	double sint = sin(theta);
	double cost = cos(theta);

	CvMat*xx= cvCreateMat(2*d+1, 1, CV_64FC1);
	cvSetZero(xx);
	CvMat *temp=cvCreateMat(2*d+1, 1, CV_64FC1);

	for (int u=-wr;u<=wr;u++)
		for (int v=-wr;v<=wr;v++) {
			double ai=-u*sint+v*cost; //distance along major axis
			double bi= u*cost+v*sint; //distance along minor axis

			if (ai*ai*ira2+bi*bi*irb2 <= 1) {
				cvSet(temp, cvScalar(ai));
				cvSetReal2D(temp, 0, 0, 1.0);

				double dTemp=1;

				for (int i=0;i<2*d+1-1;i++) {
					dTemp=dTemp*cvGetReal2D(temp,i+1,0);
					cvSetReal2D(temp,i+1,0,dTemp);
				}

				cvAdd(xx,temp,xx);

			}
		}

		cvReleaseMat(&temp);

		CvMat *A=cvCreateMat(d+1,d+1,CV_64FC1);
		for (int i=0;i<d+1;i++)
			for (int j=i;j<=i+d;j++)
				cvSetReal2D(A,j-i,i,cvGetReal2D(xx,j,0));

		cvInvert(A,A,CV_LU);

		CvMat *zz=cvCreateMat(wd,wd, CV_64FC1);
		CvMat *yy=cvCreateMat(d+1,1, CV_64FC1);
		CvMat *result=cvCreateMat(d+1,1, CV_64FC1);
		CvMat *filt=cvCreateMat(wd,wd, CV_32FC1);
		cvSetZero(filt);


		for (int u=-wr;u<=wr;u++)
			for (int v=-wr;v<=wr;v++) {

				cvSetZero(zz);
				cvSetReal2D(zz,v+wr,u+wr,1);
				cvSetZero(yy);

				double ai=-u*sint+v*cost; //distance along major axis
				double bi= u*cost+v*sint; //distance along minor axis

				if (ai*ai*ira2+bi*bi*irb2 <= 1) {
					cvSet(yy, cvScalar(ai));
					cvSetReal2D(yy, 0, 0, 1.0);

					double dTemp=1;
					for (int i=0;i<d+1-1;i++) {
						dTemp=dTemp*cvGetReal2D(yy,i+1,0);
						cvSetReal2D(yy,i+1,0,dTemp);
					}

					cvMatMul(A,yy,result);
					cvSetReal2D(filt,v+wr,u+wr,cvGetReal2D(result,k-1,0));
				}
			}

			cvReleaseMat(&zz);
			cvReleaseMat(&yy);
			cvReleaseMat(&xx);
			cvReleaseMat(&A);

			CvMat *ztemp= cvCreateMat(z.rows+filt->rows-1,z.cols+filt->cols-1,CV_32FC1);
			cvCopyMakeBorder(&z,ztemp,cvPoint((filt->cols-1)/2,(filt->rows-1)/2),IPL_BORDER_CONSTANT);
			CvMat *filteredtemp= cvCreateMat(ztemp->rows,ztemp->cols,CV_32FC1);

			cvFilter2D(ztemp,filteredtemp,filt,cvPoint((filt->cols-1)/2,(filt->rows-1)/2));

			CvMat *filtered = cvCreateMat(z.rows,z.cols,CV_32FC1);
			cvGetSubRect(filteredtemp,filtered,cvRect((filt->cols-1)/2,(filt->rows-1)/2,z.cols,z.rows));

			return filtered;
}
//추후 수정
void FkPaperKeyboard_TypeA::cornerVerification(IplImage* srcImage){
	CvSize size = cvGetSize(srcImage);
	IplImage* eigImage = cvCreateImage(size, IPL_DEPTH_8U,1);
	IplImage* tempImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* grayImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* veriImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* dstImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask2 = cvCreateImage(size, IPL_DEPTH_8U, 1);
	CvRect rect = cvRect(10, 10, 640 - 20, 480 - 20);

	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3, CV_32FC1);
	CvMat* warp_matrix_invert = cvCreateMat(3,3, CV_32FC1);
	CvMat* result = cvCreateMat(3, 1, CV_32FC1);
	CvMat* dst = cvCreateMat(3, 1,CV_32FC1);

	int keyButtonCornerCount = 316;
	
	cvCvtColor(srcImage, grayImage, CV_BGR2GRAY);
	cvSetImageROI(grayImage, rect);
	cvSetImageROI(mask, rect);
	cvSetImageROI(dstImage, rect);
	cvSetImageROI(mask2, rect);

	// 150~255사이의 값만 추출해서 마스크에 저장
	cvInRangeS(grayImage, cvScalar(100, 100, 100), cvScalar(255, 255, 255), mask);
	cvCopy(mask, mask2);

	//cvShowImage("mask", mask);
	//cvShowImage("mask2", mask2);

	// 20,20? 150 미만의 값을 제외하기 위해 0인 값(mask)과 추출한 값(mask2)을 XOR 연산 한다.
	cvFloodFill(mask, cvPoint(10, 10), cvScalar(0, 0, 0));
	cvXor(mask2, mask, dstImage);
	
	//cvShowImage("mask3", mask);
	//cvShowImage("mask4", mask2);
	//cvShowImage("dstImage", dstImage);

	// 최종 연산된 이미지에서 코너 추출(각 키패드의 코너)
	cvGoodFeaturesToTrack(dstImage, eigImage, tempImage, keyButtonCorner, &keyButtonCornerCount, 0.01, 7, NULL, 7, 0);
	cvFindCornerSubPix (dstImage, keyButtonCorner, keyButtonCornerCount,cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	
	cvResetImageROI(dstImage);
	for(int i =0 ; i < 316 ; i++){
		keyButtonCorner[i].x += rect.x;
		keyButtonCorner[i].y += rect.y;
	}
	
	initKeyButtonCorner();
	
	srcQuad[CLOCKWISE_1].x = keyButtonCorner[315].x+10;
	srcQuad[CLOCKWISE_1].y = keyButtonCorner[315].y-10;
	srcQuad[CLOCKWISE_5].x = keyButtonCorner[31].x + 10;
	srcQuad[CLOCKWISE_5].y = keyButtonCorner[31].y + 10;
	srcQuad[CLOCKWISE_7].x = keyButtonCorner[0].x - 10;
	srcQuad[CLOCKWISE_7].y = keyButtonCorner[0].y + 10;
	srcQuad[CLOCKWISE_11].x = keyButtonCorner[290].x - 10;
	srcQuad[CLOCKWISE_11].y = keyButtonCorner[290].y - 10;
	dstQuad[CLOCKWISE_1].x = 640;
	dstQuad[CLOCKWISE_1].y = 0;
	dstQuad[CLOCKWISE_5].x = 640;
	dstQuad[CLOCKWISE_5].y = 480;
	dstQuad[CLOCKWISE_7].x = 0;
	dstQuad[CLOCKWISE_7].y = 480;
	dstQuad[CLOCKWISE_11].x = 0;
	dstQuad[CLOCKWISE_11].y = 0;
	cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix);
	
	cvWarpPerspective(dstImage, veriImage, warp_matrix);
	detectKeyButtonCorner(veriImage);
	cvInvert(warp_matrix, warp_matrix_invert);
	for(int i = 0 ; i < 316 ; i++){	
		cvmSet(dst, 0, 0, keyButtonCorner[i].x);  
		cvmSet(dst, 1, 0, keyButtonCorner[i].y);
		cvmSet(dst, 2, 0, 1);

		cvMatMul(warp_matrix_invert, dst, result);
		float t = cvmGet(result, 2,0);
		keyButtonCorner[i].x = cvmGet(result, 0,0)/t ;
		keyButtonCorner[i].y = cvmGet(result, 1,0)/t ;
	}
	cvResetImageROI(srcImage);
	cvResetImageROI(mask);
	cvReleaseImage(&eigImage);
	cvReleaseImage(&tempImage);
	cvReleaseImage(&grayImage);
	cvReleaseImage(&veriImage);
	cvReleaseImage(&dstImage);
	cvReleaseImage(&mask);
	cvReleaseImage(&mask2);
	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&warp_matrix_invert);
	cvReleaseMat(&result);
	cvReleaseMat(&dst);	
}
void FkPaperKeyboard_TypeA::setKeyButton(IplImage* srcImage){
	//꼭지점
	CvPoint2D32f srcQuad[4], dstQuad[4];

	IplImage* perspectiveTransImage = cvCreateImage(cvSize(640,480), IPL_DEPTH_8U, 3);

	CvMat* warp_matrix = cvCreateMat(3,3, CV_32FC1);
	CvMat* warp_matrix_invert = cvCreateMat(3,3, CV_32FC1);
	CvMat* result = cvCreateMat(3, 1, CV_32FC1);
	CvMat* dst = cvCreateMat(3, 1,CV_32FC1);

	sortPaperKeyboardCorner();
	
	srcQuad[CLOCKWISE_1] = keyboardCorner[1];
	srcQuad[CLOCKWISE_5] = keyboardCorner[2];
	srcQuad[CLOCKWISE_7] = keyboardCorner[3];
	srcQuad[CLOCKWISE_11] = keyboardCorner[0];

	dstQuad[CLOCKWISE_1].x = 640;
	dstQuad[CLOCKWISE_1].y = 0;
	dstQuad[CLOCKWISE_5].x = 640;
	dstQuad[CLOCKWISE_5].y = 480;
	dstQuad[CLOCKWISE_7].x = 0;
	dstQuad[CLOCKWISE_7].y = 480;
	dstQuad[CLOCKWISE_11].x = 0;
	dstQuad[CLOCKWISE_11].y = 0;	

	// 원근변환 후 검사한다.
	cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix); // 변환 값 계산
	cvWarpPerspective(srcImage, perspectiveTransImage, warp_matrix);	// 투영 변환 행렬을 구한다
	cornerVerification(perspectiveTransImage);	// 추출된 행렬의 유효성을 검사한다
	cvInvert(warp_matrix, warp_matrix_invert);	// 
	
	//cvShowImage("srcImage",srcImage);
	//cvShowImage("perspectiveTransImage", perspectiveTransImage);

	for(int i = 0 ; i < 316 ; i++){
		cvmSet(dst, 0, 0, keyButtonCorner[i].x);  
		cvmSet(dst, 1, 0, keyButtonCorner[i].y);
		cvmSet(dst, 2, 0, 1);

		cvMatMul(warp_matrix_invert, dst, result);
		float t = cvmGet(result, 2,0);
		keyButtonCorner[i].x = cvmGet(result, 0,0)/t;
		keyButtonCorner[i].y = cvmGet(result, 1,0)/t;
	}
	
	setKeyButtonArea(keyButtonCorner, 0, 16);
	setKeyButtonArea(keyButtonCorner, 64, 14);
	setKeyButtonArea(keyButtonCorner, 120, 14);
	setKeyButtonArea(keyButtonCorner, 176, 13);
	setKeyButtonArea(keyButtonCorner, 228, 12);
	setKeyButtonArea(keyButtonCorner, 276, 7);
	setDirectionKeyButtonArea(keyButtonCorner, 306, 2, 77);
	setDirectionKeyButtonArea(keyButtonCorner, 304, 4, 76);
	setDirectionKeyButtonArea(keyButtonCorner, 308, 3, 78);
	
	cvReleaseImage(&perspectiveTransImage);

	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&warp_matrix_invert);
	cvReleaseMat(&result);
	cvReleaseMat(&dst);
}
Ejemplo n.º 29
0
//特征匹配
void SiftMatch::on_matchButton_clicked()
{
    //若用户勾选了水平排列按钮
    if(ui->radioButton_horizontal->isChecked())
    {
        //将2幅图片合成1幅图片,img1在左,img2在右
        stacked = stack_imgs_horizontal(img1, img2);//合成图像,显示经距离比值法筛选后的匹配结果
    }
    else//用户勾选了垂直排列按钮
    {
        verticalStackFlag = true;//垂直排列标识设为true
        //将2幅图片合成1幅图片,img1在上,img2在下
        stacked = stack_imgs( img1, img2 );//合成图像,显示经距离比值法筛选后的匹配结果
    }

    //根据图1的特征点集feat1建立k-d树,返回k-d树根给kd_root
    kd_root = kdtree_build( feat1, n1 );

    Point pt1,pt2;//连线的两个端点
    double d0,d1;//feat2中每个特征点到最近邻和次近邻的距离
    int matchNum = 0;//经距离比值法筛选后的匹配点对的个数

    //遍历特征点集feat2,针对feat2中每个特征点feat,选取符合距离比值条件的匹配点,放到feat的fwd_match域中
    for(int i = 0; i < n2; i++ )
    {
        feat = feat2+i;//第i个特征点的指针
        //在kd_root中搜索目标点feat的2个最近邻点,存放在nbrs中,返回实际找到的近邻点个数
        int k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
        if( k == 2 )
        {
            d0 = descr_dist_sq( feat, nbrs[0] );//feat与最近邻点的距离的平方
            d1 = descr_dist_sq( feat, nbrs[1] );//feat与次近邻点的距离的平方
            //若d0和d1的比值小于阈值NN_SQ_DIST_RATIO_THR,则接受此匹配,否则剔除
            if( d0 < d1 * NN_SQ_DIST_RATIO_THR )
            {   //将目标点feat和最近邻点作为匹配点对
                pt2 = Point( cvRound( feat->x ), cvRound( feat->y ) );//图2中点的坐标
                pt1 = Point( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );//图1中点的坐标(feat的最近邻点)
                if(verticalStackFlag)//垂直排列
                    pt2.y += img1->height;//由于两幅图是上下排列的,pt2的纵坐标加上图1的高度,作为连线的终点
                else
                    pt2.x += img1->width;//由于两幅图是左右排列的,pt2的横坐标加上图1的宽度,作为连线的终点
                cvLine( stacked, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 );//画出连线
                matchNum++;//统计匹配点对的个数
                feat2[i].fwd_match = nbrs[0];//使点feat的fwd_match域指向其对应的匹配点
            }
        }
        free( nbrs );//释放近邻数组
    }
    qDebug()<<tr("经距离比值法筛选后的匹配点对个数:")<<matchNum<<endl;
    //显示并保存经距离比值法筛选后的匹配图

    //cvNamedWindow(IMG_MATCH1);//创建窗口
    //cvShowImage(IMG_MATCH1,stacked);//显示

    //保存匹配图
    QString name_match_DistRatio = name1;//文件名,原文件名去掉序号后加"_match_DistRatio"
    cvSaveImage(name_match_DistRatio.replace( name_match_DistRatio.lastIndexOf(".",-1)-1 , 1 , "_match_DistRatio").toAscii().data(),stacked);


    //利用RANSAC算法筛选匹配点,计算变换矩阵H,
    //无论img1和img2的左右顺序,H永远是将feat2中的特征点变换为其匹配点,即将img2中的点变换为img1中的对应点
    H = ransac_xform(feat2,n2,FEATURE_FWD_MATCH,lsq_homog,4,0.01,homog_xfer_err,3.0,&inliers,&n_inliers);

    //若能成功计算出变换矩阵,即两幅图中有共同区域
    if( H )
    {
        qDebug()<<tr("经RANSAC算法筛选后的匹配点对个数:")<<n_inliers<<endl;

//        //输出H矩阵
//        for(int i=0;i<3;i++)
//            qDebug()<<cvmGet(H,i,0)<<cvmGet(H,i,1)<<cvmGet(H,i,2);

        if(verticalStackFlag)//将2幅图片合成1幅图片,img1在上,img2在下
            stacked_ransac = stack_imgs( img1, img2 );//合成图像,显示经RANSAC算法筛选后的匹配结果
        else//将2幅图片合成1幅图片,img1在左,img2在右
            stacked_ransac = stack_imgs_horizontal(img1, img2);//合成图像,显示经RANSAC算法筛选后的匹配结果

        //img1LeftBound = inliers[0]->fwd_match->x;//图1中匹配点外接矩形的左边界
        //img1RightBound = img1LeftBound;//图1中匹配点外接矩形的右边界
        //img2LeftBound = inliers[0]->x;//图2中匹配点外接矩形的左边界
        //img2RightBound = img2LeftBound;//图2中匹配点外接矩形的右边界

        int invertNum = 0;//统计pt2.x > pt1.x的匹配点对的个数,来判断img1中是否右图

        //遍历经RANSAC算法筛选后的特征点集合inliers,找到每个特征点的匹配点,画出连线
        for(int i=0; i<n_inliers; i++)
        {
            feat = inliers[i];//第i个特征点
            pt2 = Point(cvRound(feat->x), cvRound(feat->y));//图2中点的坐标
            pt1 = Point(cvRound(feat->fwd_match->x), cvRound(feat->fwd_match->y));//图1中点的坐标(feat的匹配点)
            //qDebug()<<"pt2:("<<pt2.x<<","<<pt2.y<<")--->pt1:("<<pt1.x<<","<<pt1.y<<")";//输出对应点对

            /*找匹配点区域的边界
            if(pt1.x < img1LeftBound) img1LeftBound = pt1.x;
            if(pt1.x > img1RightBound) img1RightBound = pt1.x;
            if(pt2.x < img2LeftBound) img2LeftBound = pt2.x;
            if(pt2.x > img2RightBound) img2RightBound = pt2.x;//*/

            //统计匹配点的左右位置关系,来判断图1和图2的左右位置关系
            if(pt2.x > pt1.x)
                invertNum++;

            if(verticalStackFlag)//垂直排列
                pt2.y += img1->height;//由于两幅图是上下排列的,pt2的纵坐标加上图1的高度,作为连线的终点
            else//水平排列
                pt2.x += img1->width;//由于两幅图是左右排列的,pt2的横坐标加上图1的宽度,作为连线的终点
            cvLine(stacked_ransac,pt1,pt2,CV_RGB(255,0,255),1,8,0);//在匹配图上画出连线
        }

        //绘制图1中包围匹配点的矩形
        //cvRectangle(stacked_ransac,cvPoint(img1LeftBound,0),cvPoint(img1RightBound,img1->height),CV_RGB(0,255,0),2);
        //绘制图2中包围匹配点的矩形
        //cvRectangle(stacked_ransac,cvPoint(img1->width+img2LeftBound,0),cvPoint(img1->width+img2RightBound,img2->height),CV_RGB(0,0,255),2);

        //cvNamedWindow(IMG_MATCH2);//创建窗口
        //cvShowImage(IMG_MATCH2,stacked_ransac);//显示经RANSAC算法筛选后的匹配图

        //保存匹配图
        QString name_match_RANSAC = name1;//文件名,原文件名去掉序号后加"_match_RANSAC"
        cvSaveImage(name_match_RANSAC.replace( name_match_RANSAC.lastIndexOf(".",-1)-1 , 1 , "_match_RANSAC").toAscii().data(),stacked_ransac);


        /*程序中计算出的变换矩阵H用来将img2中的点变换为img1中的点,正常情况下img1应该是左图,img2应该是右图。
          此时img2中的点pt2和img1中的对应点pt1的x坐标的关系基本都是:pt2.x < pt1.x
          若用户打开的img1是右图,img2是左图,则img2中的点pt2和img1中的对应点pt1的x坐标的关系基本都是:pt2.x > pt1.x
          所以通过统计对应点变换前后x坐标大小关系,可以知道img1是不是右图。
          如果img1是右图,将img1中的匹配点经H的逆阵H_IVT变换后可得到img2中的匹配点*/

        //若pt2.x > pt1.x的点的个数大于内点个数的80%,则认定img1中是右图
        if(invertNum > n_inliers * 0.8)
        {
            qDebug()<<tr("img1中是右图");
            CvMat * H_IVT = cvCreateMat(3, 3, CV_64FC1);//变换矩阵的逆矩阵
            //求H的逆阵H_IVT时,若成功求出,返回非零值
            if( cvInvert(H,H_IVT) )
            {
//                //输出H_IVT
//                for(int i=0;i<3;i++)
//                    qDebug()<<cvmGet(H_IVT,i,0)<<cvmGet(H_IVT,i,1)<<cvmGet(H_IVT,i,2);
                cvReleaseMat(&H);//释放变换矩阵H,因为用不到了
                H = cvCloneMat(H_IVT);//将H的逆阵H_IVT中的数据拷贝到H中
                cvReleaseMat(&H_IVT);//释放逆阵H_IVT
                //将img1和img2对调
                IplImage * temp = img2;
                img2 = img1;
                img1 = temp;
                //cvShowImage(IMG1,img1);
                //cvShowImage(IMG2,img2);
                ui->mosaicButton->setEnabled(true);//激活全景拼接按钮
            }
            else//H不可逆时,返回0
            {
                cvReleaseMat(&H_IVT);//释放逆阵H_IVT
                QMessageBox::warning(this,tr("警告"),tr("变换矩阵H不可逆"));
            }
        }
        else
            ui->mosaicButton->setEnabled(true);//激活全景拼接按钮
    }
    else //无法计算出变换矩阵,即两幅图中没有重合区域
    {
        QMessageBox::warning(this,tr("警告"),tr("两图中无公共区域"));
    }

    ui->radioButton_horizontal->setEnabled(false);//禁用排列方向选择按钮
    ui->radioButton_vertical->setEnabled(false);
    ui->matchButton->setEnabled(false);//禁用特征匹配按钮
}
Ejemplo n.º 30
0
CV_IMPL void
cvFindCornerSubPix( const void* srcarr, CvPoint2D32f* corners,
                    int count, CvSize win, CvSize zeroZone,
                    CvTermCriteria criteria )
{
    cv::AutoBuffer<float> buffer;
    
    const int MAX_ITERS = 100;
    const float drv_x[] = { -1.f, 0.f, 1.f };
    const float drv_y[] = { 0.f, 0.5f, 0.f };
    float *maskX;
    float *maskY;
    float *mask;
    float *src_buffer;
    float *gx_buffer;
    float *gy_buffer;
    int win_w = win.width * 2 + 1, win_h = win.height * 2 + 1;
    int win_rect_size = (win_w + 4) * (win_h + 4);
    double coeff;
    CvSize size, src_buf_size;
    int i, j, k, pt_i;
    int max_iters = 10;
    double eps = 0;

    CvMat stub, *src = (CvMat*)srcarr;
    src = cvGetMat( srcarr, &stub );

    if( CV_MAT_TYPE( src->type ) != CV_8UC1 )
        CV_Error( CV_StsBadMask, "" );

    if( !corners )
        CV_Error( CV_StsNullPtr, "" );

    if( count < 0 )
        CV_Error( CV_StsBadSize, "" );

    if( count == 0 )
        return;

    if( win.width <= 0 || win.height <= 0 )
        CV_Error( CV_StsBadSize, "" );

    size = cvGetMatSize( src );

    if( size.width < win_w + 4 || size.height < win_h + 4 )
        CV_Error( CV_StsBadSize, "" );

    /* initialize variables, controlling loop termination */
    switch( criteria.type )
    {
    case CV_TERMCRIT_ITER:
        eps = 0.f;
        max_iters = criteria.max_iter;
        break;
    case CV_TERMCRIT_EPS:
        eps = criteria.epsilon;
        max_iters = MAX_ITERS;
        break;
    case CV_TERMCRIT_ITER | CV_TERMCRIT_EPS:
        eps = criteria.epsilon;
        max_iters = criteria.max_iter;
        break;
    default:
        assert( 0 );
        CV_Error( CV_StsBadFlag, "" );
    }

    eps = MAX( eps, 0 );
    eps *= eps;                 /* use square of error in comparsion operations. */

    max_iters = MAX( max_iters, 1 );
    max_iters = MIN( max_iters, MAX_ITERS );

    buffer.allocate( win_rect_size * 5 + win_w + win_h + 32 );

    /* assign pointers */
    maskX = buffer;
    maskY = maskX + win_w + 4;
    mask = maskY + win_h + 4;
    src_buffer = mask + win_w * win_h;
    gx_buffer = src_buffer + win_rect_size;
    gy_buffer = gx_buffer + win_rect_size;

    coeff = 1. / (win.width * win.width);

    /* calculate mask */
    for( i = -win.width, k = 0; i <= win.width; i++, k++ )
    {
        maskX[k] = (float)exp( -i * i * coeff );
    }

    if( win.width == win.height )
    {
        maskY = maskX;
    }
    else
    {
        coeff = 1. / (win.height * win.height);
        for( i = -win.height, k = 0; i <= win.height; i++, k++ )
        {
            maskY[k] = (float) exp( -i * i * coeff );
        }
    }

    for( i = 0; i < win_h; i++ )
    {
        for( j = 0; j < win_w; j++ )
        {
            mask[i * win_w + j] = maskX[j] * maskY[i];
        }
    }


    /* make zero_zone */
    if( zeroZone.width >= 0 && zeroZone.height >= 0 &&
        zeroZone.width * 2 + 1 < win_w && zeroZone.height * 2 + 1 < win_h )
    {
        for( i = win.height - zeroZone.height; i <= win.height + zeroZone.height; i++ )
        {
            for( j = win.width - zeroZone.width; j <= win.width + zeroZone.width; j++ )
            {
                mask[i * win_w + j] = 0;
            }
        }
    }

    /* set sizes of image rectangles, used in convolutions */
    src_buf_size.width = win_w + 2;
    src_buf_size.height = win_h + 2;

    /* do optimization loop for all the points */
    for( pt_i = 0; pt_i < count; pt_i++ )
    {
        CvPoint2D32f cT = corners[pt_i], cI = cT;
        int iter = 0;
        double err;

        do
        {
            CvPoint2D32f cI2;
            double a, b, c, bb1, bb2;

            IPPI_CALL( icvGetRectSubPix_8u32f_C1R( (uchar*)src->data.ptr, src->step, size,
                                        src_buffer, (win_w + 2) * sizeof( src_buffer[0] ),
                                        cvSize( win_w + 2, win_h + 2 ), cI ));

            /* calc derivatives */
            icvSepConvSmall3_32f( src_buffer, src_buf_size.width * sizeof(src_buffer[0]),
                                  gx_buffer, win_w * sizeof(gx_buffer[0]),
                                  src_buf_size, drv_x, drv_y, buffer );

            icvSepConvSmall3_32f( src_buffer, src_buf_size.width * sizeof(src_buffer[0]),
                                  gy_buffer, win_w * sizeof(gy_buffer[0]),
                                  src_buf_size, drv_y, drv_x, buffer );

            a = b = c = bb1 = bb2 = 0;

            /* process gradient */
            for( i = 0, k = 0; i < win_h; i++ )
            {
                double py = i - win.height;

                for( j = 0; j < win_w; j++, k++ )
                {
                    double m = mask[k];
                    double tgx = gx_buffer[k];
                    double tgy = gy_buffer[k];
                    double gxx = tgx * tgx * m;
                    double gxy = tgx * tgy * m;
                    double gyy = tgy * tgy * m;
                    double px = j - win.width;

                    a += gxx;
                    b += gxy;
                    c += gyy;

                    bb1 += gxx * px + gxy * py;
                    bb2 += gxy * px + gyy * py;
                }
            }

            {
                double A[4];
                double InvA[4];
                CvMat matA, matInvA;

                A[0] = a;
                A[1] = A[2] = b;
                A[3] = c;

                cvInitMatHeader( &matA, 2, 2, CV_64F, A );
                cvInitMatHeader( &matInvA, 2, 2, CV_64FC1, InvA );

                cvInvert( &matA, &matInvA, CV_SVD );
                cI2.x = (float)(cI.x + InvA[0]*bb1 + InvA[1]*bb2);
                cI2.y = (float)(cI.y + InvA[2]*bb1 + InvA[3]*bb2);
            }

            err = (cI2.x - cI.x) * (cI2.x - cI.x) + (cI2.y - cI.y) * (cI2.y - cI.y);
            cI = cI2;
        }
        while( ++iter < max_iters && err > eps );

        /* if new point is too far from initial, it means poor convergence.
           leave initial point as the result */
        if( fabs( cI.x - cT.x ) > win.width || fabs( cI.y - cT.y ) > win.height )
        {
            cI = cT;
        }

        corners[pt_i] = cI;     /* store result */
    }
}