CV_IMPL const CvMat* cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement ) { if( !kalman || !measurement ) CV_Error( CV_StsNullPtr, "" ); /* temp2 = H*P'(k) */ cvMatMulAdd( kalman->measurement_matrix, kalman->error_cov_pre, 0, kalman->temp2 ); /* temp3 = temp2*Ht + R */ cvGEMM( kalman->temp2, kalman->measurement_matrix, 1, kalman->measurement_noise_cov, 1, kalman->temp3, CV_GEMM_B_T ); /* temp4 = inv(temp3)*temp2 = Kt(k) */ cvSolve( kalman->temp3, kalman->temp2, kalman->temp4, CV_SVD ); /* K(k) */ cvTranspose( kalman->temp4, kalman->gain ); /* temp5 = z(k) - H*x'(k) */ cvGEMM( kalman->measurement_matrix, kalman->state_pre, -1, measurement, 1, kalman->temp5 ); /* x(k) = x'(k) + K(k)*temp5 */ cvMatMulAdd( kalman->gain, kalman->temp5, kalman->state_pre, kalman->state_post ); /* P(k) = P'(k) - K(k)*temp2 */ cvGEMM( kalman->gain, kalman->temp2, -1, kalman->error_cov_pre, 1, kalman->error_cov_post, 0 ); return kalman->state_post; }
// y := alpha * A * X + beta * y inline void __dgemv( char trans, int m, int n, double alpha, double *A, // n * m int lda, double *X, // m('T') int incx, double beta, double *y, // n('T') int incy ) { assert(incx==1 && incy==1); if(trans=='T') { CvMat A_mat= cvMat(n, m, CV_64FC1, A); CvMat X_mat= cvMat(m, 1, CV_64FC1, X); CvMat y_mat= cvMat(n, 1, CV_64FC1, y); cvGEMM(&A_mat, &X_mat, alpha, &y_mat, beta, &y_mat, 0); } else if(trans=='N') { CvMat A_mat= cvMat(n, m, CV_64FC1, A); CvMat X_mat= cvMat(n, 1, CV_64FC1, X); CvMat y_mat= cvMat(m, 1, CV_64FC1, y); cvGEMM(&A_mat, &X_mat, alpha, &y_mat, beta, &y_mat, CV_GEMM_A_T); } else { printf("error in function __dgemv"); exit(-1); } }
CV_IMPL const CvMat* cvKalmanPredict( CvKalman* kalman, const CvMat* control ) { if( !kalman ) CV_Error( CV_StsNullPtr, "" ); /* update the state */ /* x'(k) = A*x(k) */ cvMatMulAdd( kalman->transition_matrix, kalman->state_post, 0, kalman->state_pre ); if( control && kalman->CP > 0 ) /* x'(k) = x'(k) + B*u(k) */ cvMatMulAdd( kalman->control_matrix, control, kalman->state_pre, kalman->state_pre ); /* update error covariance matrices */ /* temp1 = A*P(k) */ cvMatMulAdd( kalman->transition_matrix, kalman->error_cov_post, 0, kalman->temp1 ); /* P'(k) = temp1*At + Q */ cvGEMM( kalman->temp1, kalman->transition_matrix, 1, kalman->process_noise_cov, 1, kalman->error_cov_pre, CV_GEMM_B_T ); /* handle the case when there will be measurement before the next predict */ cvCopy(kalman->state_pre, kalman->state_post); return kalman->state_pre; }
//! Performs one step of extremum interpolation. void interpolateStep(int r, int c, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b, double* xi, double* xr, double* xc ) //void interpolateStep() { CvMat* dD, * H, * H_inv, X; double x[3] = { 0 }; dD = deriv3D( r, c, t, m, b ); H = hessian3D( r, c, t, m, b ); H_inv = CreateMat( 3, 3, CV_64FC1 ); cvInvert( H, H_inv, CV_SVD ); // incomplete check after invert() => CreateSVD() //cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP ); InitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP ); //check cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 ); //incomplete free(&dD); free(&H); free(&H_inv); //cvReleaseMat( &dD ); //cvReleaseMat( &H ); //cvReleaseMat( &H_inv ); *xi = x[2]; *xr = x[1]; *xc = x[0]; }
//============================================================================ void AAM_Basic::CalcGradientMatrix(const CvMat* CParams, const CvMat* vCDisps, const CvMat* vPoseDisps, const std::vector<AAM_Shape>& AllShapes, const std::vector<IplImage*>& AllImages) { int npixels = __cam.__texture.nPixels(); int np = __cam.nModes(); // do model parameter experiments { printf("Calculating parameter gradient matrix...\n"); CvMat* GParam = cvCreateMat(np, npixels, CV_64FC1);cvZero(GParam); CvMat* GtG = cvCreateMat(np, np, CV_64FC1); CvMat* GtGInv = cvCreateMat(np, np, CV_64FC1); // estimate Rc EstCParamGradientMatrix(GParam, CParams, AllShapes, AllImages, vCDisps); __Rc = cvCreateMat(np, npixels, CV_64FC1); cvGEMM(GParam, GParam, 1, NULL, 0, GtG, CV_GEMM_B_T); cvInvert(GtG, GtGInv, CV_SVD ); cvMatMul(GtGInv, GParam, __Rc); cvReleaseMat(&GtG); cvReleaseMat(&GtGInv); cvReleaseMat(&GParam); } // do pose experiments, this is for global shape normalization { printf("Calculating pose gradient matrix...\n"); CvMat* GtG = cvCreateMat(4, 4, CV_64FC1); CvMat* GtGInv = cvCreateMat(4, 4, CV_64FC1); CvMat* GPose = cvCreateMat(4, npixels, CV_64FC1); cvZero(GPose); // estimate Rt EstPoseGradientMatrix(GPose, CParams, AllShapes, AllImages, vPoseDisps); __Rq = cvCreateMat(4, npixels, CV_64FC1); cvGEMM(GPose, GPose, 1, NULL, 0, GtG, CV_GEMM_B_T); cvInvert(GtG, GtGInv, CV_SVD); cvMatMul(GtGInv, GPose, __Rq); cvReleaseMat(&GtG); cvReleaseMat(&GtGInv); cvReleaseMat(&GPose); } }
CvSize get_Stitched_Size(CvSize im1_size, CvSize im2_size, CvMat* Homo_Mat, double XDATA[], double YDATA[] )/*{{{*/ { int Width = 0; int Height = 0; double p1[3] = { 0, 0, 1 }; double p2[3] = { im2_size.width-1 , 0, 1 }; double p3[3] = { 0, im2_size.height-1, 1 }; double p4[3] = { im2_size.width-1, im2_size.height-1, 1 }; CvMat pp1 = cvMat( 3, 1, CV_64FC1, p1 ); CvMat pp2 = cvMat( 3, 1, CV_64FC1, p2 ); CvMat pp3 = cvMat( 3, 1, CV_64FC1, p3 ); CvMat pp4 = cvMat( 3, 1, CV_64FC1, p4 ); cvGEMM( Homo_Mat, &pp1, 1, NULL, 0, &pp1, 0 ); cvGEMM( Homo_Mat, &pp2, 1, NULL, 0, &pp2, 0 ); cvGEMM( Homo_Mat, &pp3, 1, NULL, 0, &pp3, 0 ); cvGEMM( Homo_Mat, &pp4, 1, NULL, 0, &pp4, 0 ); /* Normalization pp --->(x y 1) */ double L1 = cvmGet( &pp1, 2, 0 ); double L2 = cvmGet( &pp2, 2, 0 ); double L3 = cvmGet( &pp3, 2, 0 ); double L4 = cvmGet( &pp4, 2, 0 ); XDATA[0] = min( min( min( cvmGet(&pp1,0,0)/L1, cvmGet(&pp2,0,0)/L2 ), cvmGet(&pp3,0,0)/L3 ),cvmGet(&pp4,0,0 )/L4 ); YDATA[0] = min( min( min( cvmGet(&pp1,1,0)/L1, cvmGet(&pp2,1,0)/L2 ), cvmGet(&pp3,1,0)/L3 ),cvmGet(&pp4,1,0 )/L4 ); XDATA[1] = max( max( max( cvmGet(&pp1,0,0)/L1, cvmGet(&pp2,0,0)/L2 ), cvmGet(&pp3,0,0)/L3 ),cvmGet(&pp4,0,0 )/L4 ); YDATA[1] = max( max( max( cvmGet(&pp1,1,0)/L1, cvmGet(&pp2,1,0)/L2 ), cvmGet(&pp3,1,0)/L3 ),cvmGet(&pp4,1,0 )/L4 ); Width = max( max( max( im1_size.width, XDATA[1] ), im1_size.width-XDATA[0] ), XDATA[1]-XDATA[0] ) ; Height = max( max( max( im1_size.height, YDATA[1] ), im1_size.height-YDATA[0] ), YDATA[1]-YDATA[0] ) ; Width = cvRound( Width ); printf("New image's Width is %d\n", Width ); Height = cvRound( Height ); printf("New image's Height is %d\n", Height ); return cvSize( Width, Height ); }/*}}}*/
void BazARTracker::show_result(CamAugmentation &augment, IplImage *video, IplImage **dst) { if (getDebugMode()){ if (*dst==0) *dst=cvCloneImage(video); else cvCopy(video, *dst); } CvMat *m = augment.GetProjectionMatrix(0); // Flip...(This occured from OpenGL origin / camera origin) CvMat *coordinateTrans = cvCreateMat(3, 3, CV_64F); cvmSetIdentity(coordinateTrans); cvmSet(coordinateTrans, 1, 1, -1); cvmSet(coordinateTrans, 1, 2, m_cparam->cparam.ysize); cvMatMul(coordinateTrans, m, m); // extract intrinsic camera parameters from bazar's projection matrix.. GetARToolKitRTfromBAZARProjMat(g_matIntrinsic, m, matCameraRT4_4); cvTranspose(matCameraRT4_4, matCameraRT4_4); cvReleaseMat(&coordinateTrans); // Debug if (getDebugMode()) { // draw the coordinate system axes double w =video->width/2.0; double h =video->height/2.0; // 3D coordinates of an object double pts[4][4] = { {w,h,0, 1}, // 0,0,0,1 {w*2,h,0, 1}, // w, 0 {w,h*2,0, 1}, // 0, h {w,h,-w-h, 1} // 0, 0, - }; CvMat ptsMat, projectedMat; cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts); cvInitMatHeader(&projectedMat, 3, 4, CV_64FC1, projected); cvGEMM(m, &ptsMat, 1, 0, 0, &projectedMat, CV_GEMM_B_T ); for (int i=0; i<4; i++) { projected[0][i] /= projected[2][i]; projected[1][i] /= projected[2][i]; } // draw the projected lines cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]), cvPoint((int)projected[0][1], (int)projected[1][1]), CV_RGB(255,0,0), 2); cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]), cvPoint((int)projected[0][2], (int)projected[1][2]), CV_RGB(0,255,0), 2); cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]), cvPoint((int)projected[0][3], (int)projected[1][3]), CV_RGB(0,0,255), 2); } }
//计算图2的四个角经矩阵H变换后的坐标 void SiftMatch::CalcFourCorner() { //计算图2的四个角经矩阵H变换后的坐标 double v2[]={0,0,1};//左上角 double v1[3];//变换后的坐标值 CvMat V2 = cvMat(3,1,CV_64FC1,v2); CvMat V1 = cvMat(3,1,CV_64FC1,v1); cvGEMM(H,&V2,1,0,1,&V1);//矩阵乘法 leftTop.x = cvRound(v1[0]/v1[2]); leftTop.y = cvRound(v1[1]/v1[2]); //cvCircle(xformed,leftTop,7,CV_RGB(255,0,0),2); //将v2中数据设为左下角坐标 v2[0] = 0; v2[1] = img2->height; V2 = cvMat(3,1,CV_64FC1,v2); V1 = cvMat(3,1,CV_64FC1,v1); cvGEMM(H,&V2,1,0,1,&V1); leftBottom.x = cvRound(v1[0]/v1[2]); leftBottom.y = cvRound(v1[1]/v1[2]); //cvCircle(xformed,leftBottom,7,CV_RGB(255,0,0),2); //将v2中数据设为右上角坐标 v2[0] = img2->width; v2[1] = 0; V2 = cvMat(3,1,CV_64FC1,v2); V1 = cvMat(3,1,CV_64FC1,v1); cvGEMM(H,&V2,1,0,1,&V1); rightTop.x = cvRound(v1[0]/v1[2]); rightTop.y = cvRound(v1[1]/v1[2]); //cvCircle(xformed,rightTop,7,CV_RGB(255,0,0),2); //将v2中数据设为右下角坐标 v2[0] = img2->width; v2[1] = img2->height; V2 = cvMat(3,1,CV_64FC1,v2); V1 = cvMat(3,1,CV_64FC1,v1); cvGEMM(H,&V2,1,0,1,&V1); rightBottom.x = cvRound(v1[0]/v1[2]); rightBottom.y = cvRound(v1[1]/v1[2]); //cvCircle(xformed,rightBottom,7,CV_RGB(255,0,0),2); }
void EyeTracker::calculateScenePoint() { centerMatrix->data.db[0] = aver_center.x; centerMatrix->data.db[1] = aver_center.y; centerMatrix->data.db[2] = 1; cvGEMM(homography, centerMatrix, 1, 0 , 0, hedefp); scenePoint = cvPoint(cvRound(hedefp->data.db[0] / hedefp->data.db[2]), cvRound(hedefp->data.db[1] / hedefp->data.db[2])); }
void MT_CVQuadraticMul(const CvMat* X, const CvMat* W, CvMat* dst, bool transpose_X, CvMat* tmp_prod) { bool own_prod = (tmp_prod == NULL); if(own_prod) { tmp_prod = cvCreateMat(W->rows, X->cols, cvGetElemType(X)); } cvGEMM(W, X, 1.0, NULL, 1.0, tmp_prod, transpose_X ? CV_GEMM_B_T : 0); cvGEMM(X, tmp_prod, 1.0, NULL, 1.0, dst, transpose_X ? 0 : CV_GEMM_A_T); if(own_prod) { cvReleaseMat(&tmp_prod); } }
int cvL1QCSolve( CvMat* A, CvMat* B, CvMat* X, double epsilon, double mu, CvTermCriteria lb_term_crit, CvTermCriteria cg_term_crit ) { CvMat* AAt = cvCreateMat( A->rows, A->rows, CV_MAT_TYPE(A->type) ); cvGEMM( A, A, 1, NULL, 0, AAt, CV_GEMM_B_T ); CvMat* W = cvCreateMat( A->rows, 1, CV_MAT_TYPE(X->type) ); if ( cvCGSolve( AAt, B, W, cg_term_crit ) > .5 ) { cvReleaseMat( &W ); cvReleaseMat( &AAt ); return -1; } cvGEMM( A, W, 1, NULL, 0, X, CV_GEMM_A_T ); cvReleaseMat( &W ); cvReleaseMat( &AAt ); CvMat* U = cvCreateMat( X->rows, X->cols, CV_MAT_TYPE(X->type) ); cvAbsDiffS( X, U, cvScalar(0) ); CvScalar sumAbsX = cvSum( U ); double minAbsX, maxAbsX; cvMinMaxLoc( U, &minAbsX, &maxAbsX ); cvConvertScale( U, U, .95, maxAbsX * .1 ); double tau = MAX( (2 * X->rows + 1) / sumAbsX.val[0], 1 ); if ( !(lb_term_crit.type & CV_TERMCRIT_ITER) ) lb_term_crit.max_iter = ceil( (log(2 * X->rows + 1) - log(lb_term_crit.epsilon) - log(tau)) / log(mu) ); CvTermCriteria nt_term_crit = cvTermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 50, lb_term_crit.epsilon ); for ( int i = 0; i < lb_term_crit.max_iter; ++i ) { icvL1QCNewton( A, B, X, U, epsilon, tau, nt_term_crit, cg_term_crit ); tau *= mu; } cvReleaseMat( &U ); return 0; }
/* Calculates interpolated pixel contrast. Based on Eqn. (3) in Lowe's paper. @param dog_pyr difference of Gaussians scale space pyramid @param octv octave of scale space @param intvl within-octave interval @param r pixel row @param c pixel column @param xi interpolated subpixel increment to interval @param xr interpolated subpixel increment to row @param xc interpolated subpixel increment to col @param Returns interpolated contrast. */ double interp_contr( IplImage*** dog_pyr, int octv, int intvl, int r, int c, double xi, double xr, double xc ) { CvMat* dD, X, T; double t[1], x[3] = { xc, xr, xi }; cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP ); cvInitMatHeader( &T, 1, 1, CV_64FC1, t, CV_AUTOSTEP ); dD = deriv_3D( dog_pyr, octv, intvl, r, c ); cvGEMM( dD, &X, 1, NULL, 0, &T, CV_GEMM_A_T ); cvReleaseMat( &dD ); return pixval32f( dog_pyr[octv][intvl], r, c ) + t[0] * 0.5; }
void ConformalResizing::Constrian(const ConstrainUnits& unit, CvMat*& M) { // Preprocess unit to make Matrix M less singular double meanX(0), meanY(0); for (int i = 0; i < unit.n; i++) { meanX += unit.pnts[i].x; meanY += unit.pnts[i].y; } meanX /= unit.n; meanY /= unit.n; int n = unit.n * 2; M = cvCreateMat(n, n, CV_64F); CvMat* A = cvCreateMat(n, 4, CV_64F); CvMat* Q = cvCreateMat(n, 4, CV_64F); CvMat* P = cvCreateMat(4, 4, CV_64F); // Initial A cvZero(A); for (int i = 0; i < unit.n; i++) { double x = unit.pnts[i].x - meanX; double y = unit.pnts[i].y - meanY; CV_MAT_ELEM(*A, double, 2*i, 0) = x; CV_MAT_ELEM(*A, double, 2*i, 1) = -y; CV_MAT_ELEM(*A, double, 2*i, 2) = 1; CV_MAT_ELEM(*A, double, 2*i+1, 0) = y; CV_MAT_ELEM(*A, double, 2*i+1, 1) = x; CV_MAT_ELEM(*A, double, 2*i+1, 3) = 1; } cvMulTransposed(A, P, 1); // P = (A^T * A) cvInvert(P, P, CV_SVD_SYM); // P = (A^T * A)^(-1) cvMatMul(A, P, Q); cvGEMM(Q, A, 1, NULL, 0, M, CV_GEMM_B_T); // M = M - I double* d = M->data.db; for (int i = 0; i < n; i++, d += n+1) { *d -= 1; } cvReleaseMat(&A); cvReleaseMat(&Q); cvReleaseMat(&P); }
//============================================================================ void AAM_IC::Draw(IplImage* image, const AAM_Shape& Shape, int type) { if(type == 0) AAM_Common::DrawPoints(image, Shape); else if(type == 1) AAM_Common::DrawTriangles(image, Shape, __paw.__tri); else if(type == 2) { cvGEMM(__error_t, __texture.GetBases(), 1, NULL, 1, __lamda, CV_GEMM_B_T); __texture.CalcTexture(__lamda, __warp_t); AAM_PAW paw; double minV, maxV; cvMinMaxLoc(__warp_t, &minV, &maxV); cvConvertScale(__warp_t, __warp_t, 255/(maxV-minV), -minV*255/(maxV-minV)); paw.Train(Shape, __Points, __Storage, __paw.GetTri(), false); AAM_Common::DrawAppearance(image, Shape, __warp_t, paw, __paw); } else fprintf(stderr, "ERROR(%s, %d): Unsupported drawing type\n", __FILE__, __LINE__); }
//============================================================================ void AAM_IC::CalcModifiedSD(CvMat* SD, const CvMat* dTx, const CvMat* dTy, const CvMat* Jx, const CvMat* Jy) { int i, j; //create steepest descent images double* _x = dTx->data.db; double* _y = dTy->data.db; double temp; for(i = 0; i < __shape.nModes()+4; i++) { for(j = 0; j < __paw.nPix(); j++) { temp = _x[3*j ]*cvmGet(Jx,j,i) +_y[3*j ]*cvmGet(Jy,j,i); cvmSet(SD,i,3*j,temp); temp = _x[3*j+1]*cvmGet(Jx,j,i) +_y[3*j+1]*cvmGet(Jy,j,i); cvmSet(SD,i,3*j+1,temp); temp = _x[3*j+2]*cvmGet(Jx,j,i) +_y[3*j+2]*cvmGet(Jy,j,i); cvmSet(SD,i,3*j+2,temp); } } //project out appearance variation (and linear lighting parameters) const CvMat* B = __texture.GetBases(); CvMat* V = cvCreateMat(4+__shape.nModes(), __texture.nModes(), CV_64FC1); CvMat SDMat, BMat; cvGEMM(SD, B, 1., NULL, 1., V, CV_GEMM_B_T); // Equation (63),(64) for(i = 0; i < __shape.nModes()+4; i++) { for(j = 0; j < __texture.nModes(); j++) { cvGetRow(SD, &SDMat, i); cvGetRow(B, &BMat, j); cvScaleAdd(&BMat, cvScalar(-cvmGet(V,i,j)), &SDMat, &SDMat); } } cvReleaseMat(&V); }
/* Calculates interpolated pixel contrast. Based on Eqn. (3) in Lowe's paper. @param dog_pyr difference of Gaussians scale space pyramid @param octv octave of scale space @param intvl within-octave interval @param r pixel row @param c pixel column @param xi interpolated subpixel increment to interval @param xr interpolated subpixel increment to row @param xc interpolated subpixel increment to col @param Returns interpolated contrast. */ static double interp_contr( IplImage*** dog_pyr, int octv, int intvl, int r, int c, double xi, double xr, double xc ) { CvMat* dD, X, T; double t[1], x[3] = { xc, xr, xi }; //偏移量组成的列向量X,其中是x,y,σ三方向上的偏移量 cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP ); //矩阵乘法的结果T,是一个数值 cvInitMatHeader( &T, 1, 1, CV_64FC1, t, CV_AUTOSTEP ); //在DoG金字塔中计算某点的x方向、y方向以及尺度方向上的偏导数,结果存放在列向量dD中 dD = deriv_3D( dog_pyr, octv, intvl, r, c ); //矩阵乘法:T = dD^T * X cvGEMM( dD, &X, 1, NULL, 0, &T, CV_GEMM_A_T ); cvReleaseMat( &dD ); //返回计算出的对比度值:D + 0.5 * dD^T * X (具体公式推导见SIFT算法说明) return pixval32f( dog_pyr[octv][intvl], r, c ) + t[0] * 0.5; }
CvPoint2D32f BackgroundModel::convertToSurfaceCoordinates(CvPoint pointInDepthImage) const { CvPoint2D32f p; CvMat *src = cvCreateMat(3, 1, CV_32FC1); CvMat *dst = cvCreateMat(3, 1, CV_32FC1); CV_MAT_ELEM(*src, float, 0, 0) = pointInDepthImage.x; CV_MAT_ELEM(*src, float, 1, 0) = pointInDepthImage.y; CV_MAT_ELEM(*src, float, 2, 0) = 1.0f; // apply the homography cvGEMM(_img2surface, src, 1.0f, NULL, 0.0f, dst, 0); p.x = CV_MAT_ELEM(*dst, float, 0, 0) / CV_MAT_ELEM(*dst, float, 2, 0); p.y = CV_MAT_ELEM(*dst, float, 1, 0) / CV_MAT_ELEM(*dst, float, 2, 0); cvReleaseMat(&src); cvReleaseMat(&dst); return p; }
// Fit a hyperplane to a set of ND points. // Note: Input points must be in the form of an NxM matrix, where M is the dimensionality. // This function finds the best-fit plane P, in the least-squares // sense, between the points (X,Y,Z). The resulting plane P is described // by the coefficient vector W, where W(1)*X + W(2)*Y +W(3)*Z = W(3), for // (X,Y,Z) on the plane P. void cvFitPlane(const CvMat* points, float* plane){ // Estimate geometric centroid. int nrows = points->rows; int ncols = points->cols; int type = points->type; CvMat* centroid = cvCreateMat(1, ncols, type); cvSet(centroid, cvScalar(0)); for(int c=0; c<ncols; c++){ for(int r=0; r<nrows; r++) centroid->data.fl[c] += points->data.fl[ncols*r+c]; centroid->data.fl[c] /= nrows; } // Subtract geometric centroid from each point. CvMat* points2 = cvCreateMat(nrows, ncols, type); for(int r=0; r<nrows; r++) for(int c=0; c<ncols; c++) points2->data.fl[ncols*r+c] = points->data.fl[ncols*r+c] - centroid->data.fl[c]; // Evaluate SVD of covariance matrix. CvMat* A = cvCreateMat(ncols, ncols, type); CvMat* W = cvCreateMat(ncols, ncols, type); CvMat* V = cvCreateMat(ncols, ncols, type); cvGEMM(points2, points, 1, NULL, 0, A, CV_GEMM_A_T); cvSVD(A, W, NULL, V, CV_SVD_V_T); // Assign plane coefficients by singular vector corresponding to smallest singular value. plane[ncols] = 0; for(int c=0; c<ncols; c++){ plane[c] = V->data.fl[ncols*(ncols-1)+c]; plane[ncols] += plane[c]*centroid->data.fl[c]; } // Release allocated resources. cvReleaseMat(¢roid); cvReleaseMat(&points2); cvReleaseMat(&A); cvReleaseMat(&W); cvReleaseMat(&V); }
void interp_step( IplImage*** dog_pyr, int octv, int intvl, int r, int c, double* xi, double* xr, double* xc ) { CvMat* dD, * H, * H_inv, X; double x[3] = { 0 }; dD = deriv_3D( dog_pyr, octv, intvl, r, c ); H = hessian_3D( dog_pyr, octv, intvl, r, c ); H_inv = cvCreateMat( 3, 3, CV_64FC1 ); cvInvert( H, H_inv, CV_SVD ); cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP ); cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 ); cvReleaseMat( &dD ); cvReleaseMat( &H ); cvReleaseMat( &H_inv ); *xi = x[2]; *xr = x[1]; *xc = x[0]; }
//! Performs one step of extremum interpolation. void FastHessian::interpolateStep(int r, int c, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b, double* xi, double* xr, double* xc ) { CvMat* dD, * H, * H_inv, X; double x[3] = { 0 }; dD = deriv3D( r, c, t, m, b ); H = hessian3D( r, c, t, m, b ); H_inv = cvCreateMat( 3, 3, CV_64FC1 ); cvInvert( H, H_inv, CV_SVD ); cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP ); cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 ); cvReleaseMat( &dD ); cvReleaseMat( &H ); cvReleaseMat( &H_inv ); *xi = x[2]; *xr = x[1]; *xc = x[0]; }
void CTWithWater::setCalibrationAndWaterDepth(const CalibrationData& calibrationData, double water_depth) { calibrationDataToOpenCVCalibration(calibrationData, m_CameraMatrix, m_DistCoeffs, m_Rv, m_T, m_R); /* CameraWorld = -R^T T */ cvGEMM(m_R, m_T, -1.0, NULL, 0, m_CameraWorld, CV_GEMM_A_T); cvSetIdentity(m_CameraMatrixNorm); /* TODO: These should really be read in from the calibration */ cvSetReal2D(m_CameraMatrixNorm, 0, 0, 472.0); cvSetReal2D(m_CameraMatrixNorm, 1, 1, 472.0); cvSetReal2D(m_CameraMatrixNorm, 0, 2, 320.0); cvSetReal2D(m_CameraMatrixNorm, 1, 2, 240.0); cvZero(m_DistCoeffsNorm); setWaterDepth(water_depth); }
/* Performs one step of extremum interpolation. Based on Eqn. (3) in Lowe's paper. @param dog_pyr difference of Gaussians scale space pyramid @param octv octave of scale space @param intvl interval being interpolated @param r row being interpolated @param c column being interpolated @param xi output as interpolated subpixel increment to interval @param xr output as interpolated subpixel increment to row @param xc output as interpolated subpixel increment to col */ static void interp_step( IplImage*** dog_pyr, int octv, int intvl, int r, int c, double* xi, double* xr, double* xc ) { CvMat* dD, * H, * H_inv, X; double x[3] = { 0 }; //在DoG金字塔中计算某点的x方向、y方向以及尺度方向上的偏导数,结果存放在列向量dD中 dD = deriv_3D( dog_pyr, octv, intvl, r, c ); //在DoG金字塔中计算某点的3*3海森矩阵 H = hessian_3D( dog_pyr, octv, intvl, r, c ); H_inv = cvCreateMat( 3, 3, CV_64FC1 );//海森矩阵的逆阵 cvInvert( H, H_inv, CV_SVD ); cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP ); //X = - H^(-1) * dD,H的三个元素分别是x,y,σ方向上的偏移量(具体见SIFT算法说明) cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 ); cvReleaseMat( &dD ); cvReleaseMat( &H ); cvReleaseMat( &H_inv ); *xi = x[2];//σ方向(层方向)偏移量 *xr = x[1];//y方向上偏移量 *xc = x[0];//x方向上偏移量 }
//!Estimate transform from a set of points void SimilarityTransform::estimateFromPoints(const CvMat * points1, const CvMat * points2) { //const CvMat * temp; //CV_SWAP(points1, points2, temp); /* AffineTransform * pAT = getAffineTransform(); pAT->estimateFromPoints(points1, points2); delete pAT;*/ //Umeyama's algorithm: //Find mean and s.d. int numPoints = points1->cols; double meanP1Data[2]; CvMat meanP1 = cvMat(2, 1, CV_64FC1, meanP1Data); double meanP2Data[2]; CvMat meanP2 = cvMat(2, 1, CV_64FC1, meanP2Data); cvSetZero(&meanP1); cvSetZero(&meanP2); for(int i = 0; i<numPoints; i++) { meanP1Data[0] += cvmGet(points1, 0, i); meanP1Data[1] += cvmGet(points1, 1, i); meanP2Data[0] += cvmGet(points2, 0, i); meanP2Data[1] += cvmGet(points2, 1, i); } double numPoints_inv = 1.0/numPoints; meanP1Data[0] *= numPoints_inv; meanP1Data[1] *= numPoints_inv; meanP2Data[0] *= numPoints_inv; meanP2Data[1] *= numPoints_inv; //Now calculate variance double varP1, varP2; varP1 = 0; varP2 = 0; double SIGMAData[4]; CvMat SIGMA = cvMat(2, 2, CV_64FC1, SIGMAData); cvSetZero(&SIGMA); for(int i = 0; i<numPoints; i++) { double x1 = cvmGet(points1, 0, i) - meanP1Data[0]; double y1 = cvmGet(points1, 1, i) - meanP1Data[1]; double x2 = cvmGet(points2, 0, i) - meanP2Data[0]; double y2 = cvmGet(points2, 1, i) - meanP2Data[1]; varP1 += sqr(x1) + sqr(y1); varP2 += sqr(x2) + sqr(y2); SIGMAData[0] += x1*x2; SIGMAData[1] += x1*y2; SIGMAData[2] += y1*x2; SIGMAData[3] += y1*y2; } varP1 *= numPoints_inv; varP2 *= numPoints_inv; SIGMAData[0] *= numPoints_inv; SIGMAData[1] *= numPoints_inv; SIGMAData[2] *= numPoints_inv; SIGMAData[3] *= numPoints_inv; double DData[4]; CvMat D = cvMat(2, 2, CV_64FC1, DData); cvSetZero(&D); double UData[4]; CvMat U = cvMat(2, 2, CV_64FC1, UData); cvSetZero(&U); double VData[4]; CvMat V = cvMat(2, 2, CV_64FC1, VData); cvSetZero(&V); double RotationData[4]; CvMat Rotation = cvMat(2, 2, CV_64FC1, RotationData); cvSetZero(&Rotation); cvSVD(&SIGMA, &D, &U, &V); cvGEMM(&U, &V, 1, 0, 0, &Rotation, CV_GEMM_B_T); // theta_ = acos(RotationData[0]); theta_ = asin(RotationData[1]); scale_ = (1.0/varP1) * cvTrace(&D).val[0]; double transData[2]; CvMat trans = cvMat(2, 1, CV_64FC1, transData); cvSetZero(&trans); cvGEMM( &Rotation, &meanP1, -scale_, &meanP2, 1, &trans); translation_ = cvPoint2D64f(transData[0], transData[1]); //Applying this to p1 gives us p2 }
/* log_weight_div_det[k] = -2*log(weights_k) + log(det(Sigma_k))) covs[k] = cov_rotate_mats[k] * cov_eigen_values[k] * (cov_rotate_mats[k])' cov_rotate_mats[k] are orthogonal matrices of eigenvectors and cov_eigen_values[k] are diagonal matrices (represented by 1D vectors) of eigen values. The <alpha_ik> is the probability of the vector x_i to belong to the k-th cluster: <alpha_ik> ~ weights_k * exp{ -0.5[ln(det(Sigma_k)) + (x_i - mu_k)' Sigma_k^(-1) (x_i - mu_k)] } We calculate these probabilities here by the equivalent formulae: Denote S_ik = -0.5(log(det(Sigma_k)) + (x_i - mu_k)' Sigma_k^(-1) (x_i - mu_k)) + log(weights_k), M_i = max_k S_ik = S_qi, so that the q-th class is the one where maximum reaches. Then alpha_ik = exp{ S_ik - M_i } / ( 1 + sum_j!=q exp{ S_ji - M_i }) */ double CvEM::run_em( const CvVectors& train_data ) { CvMat* centered_sample = 0; CvMat* covs_item = 0; CvMat* log_det = 0; CvMat* log_weights = 0; CvMat* cov_eigen_values = 0; CvMat* samples = 0; CvMat* sum_probs = 0; log_likelihood = -DBL_MAX; CV_FUNCNAME( "CvEM::run_em" ); __BEGIN__; int nsamples = train_data.count, dims = train_data.dims, nclusters = params.nclusters; double min_variation = FLT_EPSILON; double min_det_value = MAX( DBL_MIN, pow( min_variation, dims )); double likelihood_bias = -CV_LOG2PI * (double)nsamples * (double)dims / 2., _log_likelihood = -DBL_MAX; int start_step = params.start_step; int i, j, k, n; int is_general = 0, is_diagonal = 0, is_spherical = 0; double prev_log_likelihood = -DBL_MAX / 1000., det, d; CvMat whdr, iwhdr, diag, *w, *iw; double* w_data; double* sp_data; if( nclusters == 1 ) { double log_weight; CV_CALL( cvSet( probs, cvScalar(1.)) ); if( params.cov_mat_type == COV_MAT_SPHERICAL ) { d = cvTrace(*covs).val[0]/dims; d = MAX( d, FLT_EPSILON ); inv_eigen_values->data.db[0] = 1./d; log_weight = pow( d, dims*0.5 ); } else { w_data = inv_eigen_values->data.db; if( params.cov_mat_type == COV_MAT_GENERIC ) cvSVD( *covs, inv_eigen_values, *cov_rotate_mats, 0, CV_SVD_U_T ); else cvTranspose( cvGetDiag(*covs, &diag), inv_eigen_values ); cvMaxS( inv_eigen_values, FLT_EPSILON, inv_eigen_values ); for( j = 0, det = 1.; j < dims; j++ ) det *= w_data[j]; log_weight = sqrt(det); cvDiv( 0, inv_eigen_values, inv_eigen_values ); } log_weight_div_det->data.db[0] = -2*log(weights->data.db[0]/log_weight); log_likelihood = DBL_MAX/1000.; EXIT; } if( params.cov_mat_type == COV_MAT_GENERIC ) is_general = 1; else if( params.cov_mat_type == COV_MAT_DIAGONAL ) is_diagonal = 1; else if( params.cov_mat_type == COV_MAT_SPHERICAL ) is_spherical = 1; /* In the case of <cov_mat_type> == COV_MAT_DIAGONAL, the k-th row of cov_eigen_values contains the diagonal elements (variations). In the case of <cov_mat_type> == COV_MAT_SPHERICAL - the 0-ths elements of the vectors cov_eigen_values[k] are to be equal to the mean of the variations over all the dimensions. */ CV_CALL( log_det = cvCreateMat( 1, nclusters, CV_64FC1 )); CV_CALL( log_weights = cvCreateMat( 1, nclusters, CV_64FC1 )); CV_CALL( covs_item = cvCreateMat( dims, dims, CV_64FC1 )); CV_CALL( centered_sample = cvCreateMat( 1, dims, CV_64FC1 )); CV_CALL( cov_eigen_values = cvCreateMat( inv_eigen_values->rows, inv_eigen_values->cols, CV_64FC1 )); CV_CALL( samples = cvCreateMat( nsamples, dims, CV_64FC1 )); CV_CALL( sum_probs = cvCreateMat( 1, nclusters, CV_64FC1 )); sp_data = sum_probs->data.db; // copy the training data into double-precision matrix for( i = 0; i < nsamples; i++ ) { const float* src = train_data.data.fl[i]; double* dst = (double*)(samples->data.ptr + samples->step*i); for( j = 0; j < dims; j++ ) dst[j] = src[j]; } if( start_step != START_M_STEP ) { for( k = 0; k < nclusters; k++ ) { if( is_general || is_diagonal ) { w = cvGetRow( cov_eigen_values, &whdr, k ); if( is_general ) cvSVD( covs[k], w, cov_rotate_mats[k], 0, CV_SVD_U_T ); else cvTranspose( cvGetDiag( covs[k], &diag ), w ); w_data = w->data.db; for( j = 0, det = 1.; j < dims; j++ ) det *= w_data[j]; if( det < min_det_value ) { if( start_step == START_AUTO_STEP ) det = min_det_value; else EXIT; } log_det->data.db[k] = det; } else { d = cvTrace(covs[k]).val[0]/(double)dims; if( d < min_variation ) { if( start_step == START_AUTO_STEP ) d = min_variation; else EXIT; } cov_eigen_values->data.db[k] = d; log_det->data.db[k] = d; } } cvLog( log_det, log_det ); if( is_spherical ) cvScale( log_det, log_det, dims ); } for( n = 0; n < params.term_crit.max_iter; n++ ) { if( n > 0 || start_step != START_M_STEP ) { // e-step: compute probs_ik from means_k, covs_k and weights_k. CV_CALL(cvLog( weights, log_weights )); // S_ik = -0.5[log(det(Sigma_k)) + (x_i - mu_k)' Sigma_k^(-1) (x_i - mu_k)] + log(weights_k) for( k = 0; k < nclusters; k++ ) { CvMat* u = cov_rotate_mats[k]; const double* mean = (double*)(means->data.ptr + means->step*k); w = cvGetRow( cov_eigen_values, &whdr, k ); iw = cvGetRow( inv_eigen_values, &iwhdr, k ); cvDiv( 0, w, iw ); w_data = (double*)(inv_eigen_values->data.ptr + inv_eigen_values->step*k); for( i = 0; i < nsamples; i++ ) { double *csample = centered_sample->data.db, p = log_det->data.db[k]; const double* sample = (double*)(samples->data.ptr + samples->step*i); double* pp = (double*)(probs->data.ptr + probs->step*i); for( j = 0; j < dims; j++ ) csample[j] = sample[j] - mean[j]; if( is_general ) cvGEMM( centered_sample, u, 1, 0, 0, centered_sample, CV_GEMM_B_T ); for( j = 0; j < dims; j++ ) p += csample[j]*csample[j]*w_data[is_spherical ? 0 : j]; pp[k] = -0.5*p + log_weights->data.db[k]; // S_ik <- S_ik - max_j S_ij if( k == nclusters - 1 ) { double max_val = 0; for( j = 0; j < nclusters; j++ ) max_val = MAX( max_val, pp[j] ); for( j = 0; j < nclusters; j++ ) pp[j] -= max_val; } } } CV_CALL(cvExp( probs, probs )); // exp( S_ik ) cvZero( sum_probs ); // alpha_ik = exp( S_ik ) / sum_j exp( S_ij ), // log_likelihood = sum_i log (sum_j exp(S_ij)) for( i = 0, _log_likelihood = likelihood_bias; i < nsamples; i++ ) { double* pp = (double*)(probs->data.ptr + probs->step*i), sum = 0; for( j = 0; j < nclusters; j++ ) sum += pp[j]; sum = 1./MAX( sum, DBL_EPSILON ); for( j = 0; j < nclusters; j++ ) { double p = pp[j] *= sum; sp_data[j] += p; } _log_likelihood -= log( sum ); } // check termination criteria if( fabs( (_log_likelihood - prev_log_likelihood) / prev_log_likelihood ) < params.term_crit.epsilon ) break; prev_log_likelihood = _log_likelihood; } // m-step: update means_k, covs_k and weights_k from probs_ik cvGEMM( probs, samples, 1, 0, 0, means, CV_GEMM_A_T ); for( k = 0; k < nclusters; k++ ) { double sum = sp_data[k], inv_sum = 1./sum; CvMat* cov = covs[k], _mean, _sample; w = cvGetRow( cov_eigen_values, &whdr, k ); w_data = w->data.db; cvGetRow( means, &_mean, k ); cvGetRow( samples, &_sample, k ); // update weights_k weights->data.db[k] = sum; // update means_k cvScale( &_mean, &_mean, inv_sum ); // compute covs_k cvZero( cov ); cvZero( w ); for( i = 0; i < nsamples; i++ ) { double p = probs->data.db[i*nclusters + k]*inv_sum; _sample.data.db = (double*)(samples->data.ptr + samples->step*i); if( is_general ) { cvMulTransposed( &_sample, covs_item, 1, &_mean ); cvScaleAdd( covs_item, cvRealScalar(p), cov, cov ); } else for( j = 0; j < dims; j++ ) { double val = _sample.data.db[j] - _mean.data.db[j]; w_data[is_spherical ? 0 : j] += p*val*val; } } if( is_spherical ) { d = w_data[0]/(double)dims; d = MAX( d, min_variation ); w->data.db[0] = d; log_det->data.db[k] = d; } else { if( is_general ) cvSVD( cov, w, cov_rotate_mats[k], 0, CV_SVD_U_T ); cvMaxS( w, min_variation, w ); for( j = 0, det = 1.; j < dims; j++ ) det *= w_data[j]; log_det->data.db[k] = det; } } cvConvertScale( weights, weights, 1./(double)nsamples, 0 ); cvMaxS( weights, DBL_MIN, weights ); cvLog( log_det, log_det ); if( is_spherical ) cvScale( log_det, log_det, dims ); } // end of iteration process //log_weight_div_det[k] = -2*log(weights_k/det(Sigma_k))^0.5) = -2*log(weights_k) + log(det(Sigma_k))) if( log_weight_div_det ) { cvScale( log_weights, log_weight_div_det, -2 ); cvAdd( log_weight_div_det, log_det, log_weight_div_det ); } /* Now finalize all the covariation matrices: 1) if <cov_mat_type> == COV_MAT_DIAGONAL we used array of <w> as diagonals. Now w[k] should be copied back to the diagonals of covs[k]; 2) if <cov_mat_type> == COV_MAT_SPHERICAL we used the 0-th element of w[k] as an average variation in each cluster. The value of the 0-th element of w[k] should be copied to the all of the diagonal elements of covs[k]. */ if( is_spherical ) { for( k = 0; k < nclusters; k++ ) cvSetIdentity( covs[k], cvScalar(cov_eigen_values->data.db[k])); } else if( is_diagonal ) { for( k = 0; k < nclusters; k++ ) cvTranspose( cvGetRow( cov_eigen_values, &whdr, k ), cvGetDiag( covs[k], &diag )); } cvDiv( 0, cov_eigen_values, inv_eigen_values ); log_likelihood = _log_likelihood; __END__; cvReleaseMat( &log_det ); cvReleaseMat( &log_weights ); cvReleaseMat( &covs_item ); cvReleaseMat( ¢ered_sample ); cvReleaseMat( &cov_eigen_values ); cvReleaseMat( &samples ); cvReleaseMat( &sum_probs ); return log_likelihood; }
void CvEM::init_em( const CvVectors& train_data ) { CvMat *w = 0, *u = 0, *tcov = 0; CV_FUNCNAME( "CvEM::init_em" ); __BEGIN__; double maxval = 0; int i, force_symm_plus = 0; int nclusters = params.nclusters, nsamples = train_data.count, dims = train_data.dims; if( params.start_step == START_AUTO_STEP || nclusters == 1 || nclusters == nsamples ) init_auto( train_data ); else if( params.start_step == START_M_STEP ) { for( i = 0; i < nsamples; i++ ) { CvMat prob; cvGetRow( params.probs, &prob, i ); cvMaxS( &prob, 0., &prob ); cvMinMaxLoc( &prob, 0, &maxval ); if( maxval < FLT_EPSILON ) cvSet( &prob, cvScalar(1./nclusters) ); else cvNormalize( &prob, &prob, 1., 0, CV_L1 ); } EXIT; // do not preprocess covariation matrices, // as in this case they are initialized at the first iteration of EM } else { CV_ASSERT( params.start_step == START_E_STEP && params.means ); if( params.weights && params.covs ) { cvConvert( params.means, means ); cvReshape( weights, weights, 1, params.weights->rows ); cvConvert( params.weights, weights ); cvReshape( weights, weights, 1, 1 ); cvMaxS( weights, 0., weights ); cvMinMaxLoc( weights, 0, &maxval ); if( maxval < FLT_EPSILON ) cvSet( weights, cvScalar(1./nclusters) ); cvNormalize( weights, weights, 1., 0, CV_L1 ); for( i = 0; i < nclusters; i++ ) CV_CALL( cvConvert( params.covs[i], covs[i] )); force_symm_plus = 1; } else init_auto( train_data ); } CV_CALL( tcov = cvCreateMat( dims, dims, CV_64FC1 )); CV_CALL( w = cvCreateMat( dims, dims, CV_64FC1 )); if( params.cov_mat_type == COV_MAT_GENERIC ) CV_CALL( u = cvCreateMat( dims, dims, CV_64FC1 )); for( i = 0; i < nclusters; i++ ) { if( force_symm_plus ) { cvTranspose( covs[i], tcov ); cvAddWeighted( covs[i], 0.5, tcov, 0.5, 0, tcov ); } else cvCopy( covs[i], tcov ); cvSVD( tcov, w, u, 0, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T ); if( params.cov_mat_type == COV_MAT_SPHERICAL ) cvSetIdentity( covs[i], cvScalar(cvTrace(w).val[0]/dims) ); else if( params.cov_mat_type == COV_MAT_DIAGONAL ) cvCopy( w, covs[i] ); else { // generic case: covs[i] = (u')'*max(w,0)*u' cvGEMM( u, w, 1, 0, 0, tcov, CV_GEMM_A_T ); cvGEMM( tcov, u, 1, 0, 0, covs[i], 0 ); } } __END__; cvReleaseMat( &w ); cvReleaseMat( &u ); cvReleaseMat( &tcov ); }
float CvEM::predict( const CvMat* _sample, CvMat* _probs ) const { float* sample_data = 0; void* buffer = 0; int allocated_buffer = 0; int cls = 0; CV_FUNCNAME( "CvEM::predict" ); __BEGIN__; int i, k, dims; int nclusters; int cov_mat_type = params.cov_mat_type; double opt = FLT_MAX; size_t size; CvMat diff, expo; dims = means->cols; nclusters = params.nclusters; CV_CALL( cvPreparePredictData( _sample, dims, 0, params.nclusters, _probs, &sample_data )); // allocate memory and initializing headers for calculating size = sizeof(double) * (nclusters + dims); if( size <= CV_MAX_LOCAL_SIZE ) buffer = cvStackAlloc( size ); else { CV_CALL( buffer = cvAlloc( size )); allocated_buffer = 1; } expo = cvMat( 1, nclusters, CV_64FC1, buffer ); diff = cvMat( 1, dims, CV_64FC1, (double*)buffer + nclusters ); // calculate the probabilities for( k = 0; k < nclusters; k++ ) { const double* mean_k = (const double*)(means->data.ptr + means->step*k); const double* w = (const double*)(inv_eigen_values->data.ptr + inv_eigen_values->step*k); double cur = log_weight_div_det->data.db[k]; CvMat* u = cov_rotate_mats[k]; // cov = u w u' --> cov^(-1) = u w^(-1) u' if( cov_mat_type == COV_MAT_SPHERICAL ) { double w0 = w[0]; for( i = 0; i < dims; i++ ) { double val = sample_data[i] - mean_k[i]; cur += val*val*w0; } } else { for( i = 0; i < dims; i++ ) diff.data.db[i] = sample_data[i] - mean_k[i]; if( cov_mat_type == COV_MAT_GENERIC ) cvGEMM( &diff, u, 1, 0, 0, &diff, CV_GEMM_B_T ); for( i = 0; i < dims; i++ ) { double val = diff.data.db[i]; cur += val*val*w[i]; } } expo.data.db[k] = cur; if( cur < opt ) { cls = k; opt = cur; } /* probability = (2*pi)^(-dims/2)*exp( -0.5 * cur ) */ } if( _probs ) { CV_CALL( cvConvertScale( &expo, &expo, -0.5 )); CV_CALL( cvExp( &expo, &expo )); if( _probs->cols == 1 ) CV_CALL( cvReshape( &expo, &expo, 0, nclusters )); CV_CALL( cvConvertScale( &expo, _probs, 1./cvSum( &expo ).val[0] )); } __END__; if( sample_data != _sample->data.fl ) cvFree( &sample_data ); if( allocated_buffer ) cvFree( &buffer ); return (float)cls; }
CV_IMPL void cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ, CvMat *matrixQx, CvMat *matrixQy, CvMat *matrixQz, CvPoint3D64f *eulerAngles) { CV_FUNCNAME("cvRQDecomp3x3"); __BEGIN__; double _M[3][3], _R[3][3], _Q[3][3]; CvMat M = cvMat(3, 3, CV_64F, _M); CvMat R = cvMat(3, 3, CV_64F, _R); CvMat Q = cvMat(3, 3, CV_64F, _Q); double z, c, s; /* Validate parameters. */ CV_ASSERT( CV_IS_MAT(matrixM) && CV_IS_MAT(matrixR) && CV_IS_MAT(matrixQ) && matrixM->cols == 3 && matrixM->rows == 3 && CV_ARE_SIZES_EQ(matrixM, matrixR) && CV_ARE_SIZES_EQ(matrixM, matrixQ)); cvConvert(matrixM, &M); { /* Find Givens rotation Q_x for x axis (left multiplication). */ /* ( 1 0 0 ) Qx = ( 0 c s ), c = m33/sqrt(m32^2 + m33^2), s = m32/sqrt(m32^2 + m33^2) ( 0 -s c ) */ s = _M[2][1]; c = _M[2][2]; z = 1./sqrt(c * c + s * s + DBL_EPSILON); c *= z; s *= z; double _Qx[3][3] = { {1, 0, 0}, {0, c, s}, {0, -s, c} }; CvMat Qx = cvMat(3, 3, CV_64F, _Qx); cvMatMul(&M, &Qx, &R); assert(fabs(_R[2][1]) < FLT_EPSILON); _R[2][1] = 0; /* Find Givens rotation for y axis. */ /* ( c 0 s ) Qy = ( 0 1 0 ), c = m33/sqrt(m31^2 + m33^2), s = m31/sqrt(m31^2 + m33^2) (-s 0 c ) */ s = _R[2][0]; c = _R[2][2]; z = 1./sqrt(c * c + s * s + DBL_EPSILON); c *= z; s *= z; double _Qy[3][3] = { {c, 0, s}, {0, 1, 0}, {-s, 0, c} }; CvMat Qy = cvMat(3, 3, CV_64F, _Qy); cvMatMul(&R, &Qy, &M); assert(fabs(_M[2][0]) < FLT_EPSILON); _M[2][0] = 0; /* Find Givens rotation for z axis. */ /* ( c s 0 ) Qz = (-s c 0 ), c = m22/sqrt(m21^2 + m22^2), s = m21/sqrt(m21^2 + m22^2) ( 0 0 1 ) */ s = _M[1][0]; c = _M[1][1]; z = 1./sqrt(c * c + s * s + DBL_EPSILON); c *= z; s *= z; double _Qz[3][3] = { {c, s, 0}, {-s, c, 0}, {0, 0, 1} }; CvMat Qz = cvMat(3, 3, CV_64F, _Qz); cvMatMul(&M, &Qz, &R); assert(fabs(_R[1][0]) < FLT_EPSILON); _R[1][0] = 0; // Solve the decomposition ambiguity. // Diagonal entries of R, except the last one, shall be positive. // Further rotate R by 180 degree if necessary if( _R[0][0] < 0 ) { if( _R[1][1] < 0 ) { // rotate around z for 180 degree, i.e. a rotation matrix of // [-1, 0, 0], // [ 0, -1, 0], // [ 0, 0, 1] _R[0][0] *= -1; _R[0][1] *= -1; _R[1][1] *= -1; _Qz[0][0] *= -1; _Qz[0][1] *= -1; _Qz[1][0] *= -1; _Qz[1][1] *= -1; } else { // rotate around y for 180 degree, i.e. a rotation matrix of // [-1, 0, 0], // [ 0, 1, 0], // [ 0, 0, -1] _R[0][0] *= -1; _R[0][2] *= -1; _R[1][2] *= -1; _R[2][2] *= -1; cvTranspose( &Qz, &Qz ); _Qy[0][0] *= -1; _Qy[0][2] *= -1; _Qy[2][0] *= -1; _Qy[2][2] *= -1; } } else if( _R[1][1] < 0 ) { // ??? for some reason, we never get here ??? // rotate around x for 180 degree, i.e. a rotation matrix of // [ 1, 0, 0], // [ 0, -1, 0], // [ 0, 0, -1] _R[0][1] *= -1; _R[0][2] *= -1; _R[1][1] *= -1; _R[1][2] *= -1; _R[2][2] *= -1; cvTranspose( &Qz, &Qz ); cvTranspose( &Qy, &Qy ); _Qx[1][1] *= -1; _Qx[1][2] *= -1; _Qx[2][1] *= -1; _Qx[2][2] *= -1; } // calculate the euler angle if( eulerAngles ) { eulerAngles->x = acos(_Qx[1][1]) * (_Qx[1][2] >= 0 ? 1 : -1) * (180.0 / CV_PI); eulerAngles->y = acos(_Qy[0][0]) * (_Qy[0][2] >= 0 ? 1 : -1) * (180.0 / CV_PI); eulerAngles->z = acos(_Qz[0][0]) * (_Qz[0][1] >= 0 ? 1 : -1) * (180.0 / CV_PI); } /* Calulate orthogonal matrix. */ /* Q = QzT * QyT * QxT */ cvGEMM( &Qz, &Qy, 1, 0, 0, &M, CV_GEMM_A_T + CV_GEMM_B_T ); cvGEMM( &M, &Qx, 1, 0, 0, &Q, CV_GEMM_B_T ); /* Save R and Q matrices. */ cvConvert( &R, matrixR ); cvConvert( &Q, matrixQ ); if( matrixQx ) cvConvert(&Qx, matrixQx); if( matrixQy ) cvConvert(&Qy, matrixQy); if( matrixQz ) cvConvert(&Qz, matrixQz); } __END__; }
void icvGaussNewton( const CvMat* J, const CvMat* err, CvMat* delta, CvMat* JtJ, CvMat* JtErr, CvMat* JtJW, CvMat* JtJV ) { CvMat* _temp_JtJ = 0; CvMat* _temp_JtErr = 0; CvMat* _temp_JtJW = 0; CvMat* _temp_JtJV = 0; CV_FUNCNAME( "icvGaussNewton" ); __BEGIN__; if( !CV_IS_MAT(J) || !CV_IS_MAT(err) || !CV_IS_MAT(delta) ) CV_ERROR( CV_StsBadArg, "Some of required arguments is not a valid matrix" ); if( !JtJ ) { CV_CALL( _temp_JtJ = cvCreateMat( J->cols, J->cols, J->type )); JtJ = _temp_JtJ; } else if( !CV_IS_MAT(JtJ) ) CV_ERROR( CV_StsBadArg, "JtJ is not a valid matrix" ); if( !JtErr ) { CV_CALL( _temp_JtErr = cvCreateMat( J->cols, 1, J->type )); JtErr = _temp_JtErr; } else if( !CV_IS_MAT(JtErr) ) CV_ERROR( CV_StsBadArg, "JtErr is not a valid matrix" ); if( !JtJW ) { CV_CALL( _temp_JtJW = cvCreateMat( J->cols, 1, J->type )); JtJW = _temp_JtJW; } else if( !CV_IS_MAT(JtJW) ) CV_ERROR( CV_StsBadArg, "JtJW is not a valid matrix" ); if( !JtJV ) { CV_CALL( _temp_JtJV = cvCreateMat( J->cols, J->cols, J->type )); JtJV = _temp_JtJV; } else if( !CV_IS_MAT(JtJV) ) CV_ERROR( CV_StsBadArg, "JtJV is not a valid matrix" ); cvMulTransposed( J, JtJ, 1 ); cvGEMM( J, err, 1, 0, 0, JtErr, CV_GEMM_A_T ); cvSVD( JtJ, JtJW, 0, JtJV, CV_SVD_MODIFY_A + CV_SVD_V_T ); cvSVBkSb( JtJW, JtJV, JtJV, JtErr, delta, CV_SVD_U_T + CV_SVD_V_T ); __END__; if( _temp_JtJ || _temp_JtErr || _temp_JtJW || _temp_JtJV ) { cvReleaseMat( &_temp_JtJ ); cvReleaseMat( &_temp_JtErr ); cvReleaseMat( &_temp_JtJW ); cvReleaseMat( &_temp_JtJV ); } }
int main(int argc, char **argv) { IplImage *inputImg; trainImage sample; cvNamedWindow("With COM", CV_WINDOW_AUTOSIZE); // CvCapture* capture = 0; // capture = cvCreateCameraCapture(-1); // if(!capture){ // return -1; // } // inputImg = cvQueryFrame(capture); #include <opencv2/ml/ml.hpp> float result; initializeFaceProcessor(); CvMat* SampleMatrix; CvMat* PjMatrix=(CvMat*)cvLoad("/home/umut/projects/fastTrainer/build/ProjectionMatrix.xml"); int newDimension=PjMatrix->cols; // int newDimension; CvMat* allFeatures; CvMat* LDAMatrix=cvCreateMat(newDimension,1,CV_32F); // CvBoost booster; // // booster.load("/home/umut/projects/fastTrainer/build/Booster.dat"); int trans=CV_GEMM_A_T; CvSVM SVM; SVM.load("/home/umut/projects/fastTrainer/build/SVM_CLASS.dat"); // Grab the next frame from the camera. // while((inputImg = cvQueryFrame(capture)) != NULL ){ for (int i=1;i<argc;i++){ inputImg=cvLoadImage(argv[i]); if(processFace(inputImg, sample.FaceImage, sample.MouthImage, sample.NoseImage, sample.EyeImage, 0)) { sample.LBPHF=LBP_HF(sample.FaceImage,sample.nonUniform,sample.complete); //Pass through the LBPHF // sample.EyeImage=filterGabor(sample.EyeImage); // sample.NoseImage=filterGabor(sample.NoseImage); // sample.MouthImage=filterGabor(sample.MouthImage); mat2Col(&sample,0,1,0,SampleMatrix); // newDimension=SampleMatrix->rows; allFeatures=cvCreateMat(1,35+2+newDimension,CV_32F); cvGEMM(PjMatrix,SampleMatrix,1,NULL,0,LDAMatrix,trans); cvSetReal1D(allFeatures,0,sample.complete); cvSetReal1D(allFeatures,1,sample.nonUniform); for (int j=0;j<35;j++) cvSetReal1D(allFeatures,2+j,sample.LBPHF[j]); for (int j=0;j <newDimension;j++) // cvSetReal1D(allFeatures,37+j,cvGetReal1D(SampleMatrix,j)); cvSetReal1D(allFeatures,37+j,cvGetReal1D(LDAMatrix,j)); // cout<< "feature Size: "<< allFeatures->cols << "\n"; // result=booster.predict(allFeatures,0,booster.get_weak_response()); result=SVM.predict(allFeatures); if (result==0) { cvRectangle(sample.FaceImage,cvPoint(2,2),cvPoint(sample.FaceImage->width-2,sample.FaceImage->height-2),cvScalar(255,0,0),3); printf("Result is male\n"); } else { cvRectangle(sample.FaceImage,cvPoint(2,2),cvPoint(sample.FaceImage->width-2,sample.FaceImage->height-2),cvScalar(0,0,255),3); printf("Result is female\n"); } cvShowImage("With COM",sample.FaceImage); char c=cvWaitKey(0); // char c=cvWaitKey(5); // if (c==27) break; } } if (strcmp(argv[1],"1")) cvReleaseImage( &inputImg); }
//============================================================================ void AAM_IC::Fit(const IplImage* image, AAM_Shape& Shape, int max_iter /* = 30 */, bool showprocess /* = false */) { //initialize some stuff double t = gettime; const CvMat* A0 = __texture.GetMean(); CvMat p; cvGetCols(__search_pq, &p, 4, 4+__shape.nModes()); Shape.Point2Mat(__current_s); SetAllParamsZero(); __shape.CalcParams(__current_s, __search_pq); IplImage* Drawimg = 0; for(int iter = 0; iter < max_iter; iter++) { if(showprocess) { if(Drawimg == 0) Drawimg = cvCloneImage(image); else cvCopy(image, Drawimg); Shape.Mat2Point(__current_s); Draw(Drawimg, Shape, 2); mkdir("result"); char filename[100]; sprintf(filename, "result/Iter-%02d.jpg", iter); cvSaveImage(filename, Drawimg); } //check the current shape AAM_Common::CheckShape(__current_s, image->width, image->height); //warp image to mesh shape mesh __paw.CalcWarpTexture(__current_s, image, __warp_t); AAM_TDM::NormalizeTexture(A0, __warp_t); cvSub(__warp_t, A0, __error_t); //calculate updates (and scale to account for linear lighting gain) cvGEMM(__error_t, __G, 1, NULL, 1, __delta_pq, CV_GEMM_B_T); //check for parameter convergence if(cvNorm(__delta_pq) < 1e-6) break; //apply inverse compositional algorithm to update parameters InverseCompose(__delta_pq, __current_s, __update_s); //smooth shape cvAddWeighted(__current_s, 0.4, __update_s, 0.6, 0, __update_s); //update parameters __shape.CalcParams(__update_s, __search_pq); //calculate constrained new shape __shape.CalcShape(__search_pq, __update_s); //check for shape convergence if(cvNorm(__current_s, __update_s, CV_L2) < 0.001) break; else cvCopy(__update_s, __current_s); } Shape.Mat2Point(__current_s); t = gettime-t; printf("AAM IC Fitting time cost %.3f millisec\n", t); cvReleaseImage(&Drawimg); }