void CvGBTrees::change_values(CvDTree* tree, const int _k) { CvDTreeNode** predictions = new pCvDTreeNode[get_len(subsample_train)]; int* sample_data = sample_idx->data.i; int* subsample_data = subsample_train->data.i; int s_step = (sample_idx->cols > sample_idx->rows) ? 1 : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); CvMat x; CvMat miss_x; for (int i=0; i<get_len(subsample_train); ++i) { int idx = *(sample_data + subsample_data[i]*s_step); if (data->tflag == CV_ROW_SAMPLE) cvGetRow( data->train_data, &x, idx); else cvGetCol( data->train_data, &x, idx); if (missing) { if (data->tflag == CV_ROW_SAMPLE) cvGetRow( missing, &miss_x, idx); else cvGetCol( missing, &miss_x, idx); predictions[i] = tree->predict(&x, &miss_x); } else predictions[i] = tree->predict(&x); } CvDTreeNode** leaves; int leaves_count = 0; leaves = GetLeaves( tree, leaves_count); for (int i=0; i<leaves_count; ++i) { int samples_in_leaf = 0; for (int j=0; j<get_len(subsample_train); ++j) { if (leaves[i] == predictions[j]) samples_in_leaf++; } if (!samples_in_leaf) // It should not be done anyways! but... { leaves[i]->value = 0.0; continue; } CvMat* leaf_idx = cvCreateMat(1, samples_in_leaf, CV_32S); int* leaf_idx_data = leaf_idx->data.i; for (int j=0; j<get_len(subsample_train); ++j) { int idx = *(sample_data + subsample_data[j]*s_step); if (leaves[i] == predictions[j]) *leaf_idx_data++ = idx; } float value = find_optimal_value(leaf_idx); leaves[i]->value = value; leaf_idx_data = leaf_idx->data.i; int len = sum_response_tmp->cols; for (int j=0; j<get_len(leaf_idx); ++j) { int idx = leaf_idx_data[j]; sum_response_tmp->data.fl[idx + _k*len] = sum_response->data.fl[idx + _k*len] + params.shrinkage * value; } leaf_idx_data = 0; cvReleaseMat(&leaf_idx); } // releasing the memory for (int i=0; i<get_len(subsample_train); ++i) { predictions[i] = 0; } delete[] predictions; for (int i=0; i<leaves_count; ++i) { leaves[i] = 0; } delete[] leaves; }
static void* imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) { IplImage* image = 0; CvMat *matrix = 0; Mat temp, *data = &temp; ImageDecoder decoder = findDecoder(filename); if( !decoder ) return 0; decoder->setSource(filename); if( !decoder->readHeader() ) return 0; CvSize size; size.width = decoder->width(); size.height = decoder->height(); int type = decoder->type(); if( flags != -1 ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); if( (flags & CV_LOAD_IMAGE_COLOR) != 0 || ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) ) type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3); else type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1); } if( hdrtype == LOAD_CVMAT || hdrtype == LOAD_MAT ) { if( hdrtype == LOAD_CVMAT ) { matrix = cvCreateMat( size.height, size.width, type ); temp = cvarrToMat(matrix); } else { mat->create( size.height, size.width, type ); data = mat; } } else { image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) ); temp = cvarrToMat(image); } if( !decoder->readData( *data )) { cvReleaseImage( &image ); cvReleaseMat( &matrix ); if( mat ) mat->release(); return 0; } return hdrtype == LOAD_CVMAT ? (void*)matrix : hdrtype == LOAD_IMAGE ? (void*)image : (void*)mat; }
void LineMask_limitedandleTange(int L, int N, CvMat* mhi, CvMat* M, CvMat* Mh, CvMat* mi) { /* matlab : thc = linspace(0, pi-pi/L, L); */ CvMat *thc = cvCreateMat(1,L,CV_32F); int i,j; cvmSet(thc,0,0,0.); for(i = 1; i< L; i++){ float elements = i*(pi-pi/L)/(L-1); cvmSet(thc,0,i,elements); } /*matlab : M = zeros(N);*/ M = cvCreateMat(N,N,CV_32F); cvmSetZero(M); /* matlab : (-N/2+1 : N/2-1) */ int ll = 0; CvMat *blank_aa = cvCreateMat(1, N, CV_32F); CvMat *yr = cvCreateMat(1,N,CV_32F); CvMat *xc = cvCreateMat(1,N,CV_32F); for(ll = 0; ll < N; ll++){ float elements= N*(-1.0)/2.0 + ll*1.0 + 1.0; cvmSet(blank_aa, 0, ll, elements); } /* matlab : for loop */ int nn=0; for(ll = 0; ll < L; ll++){ if(cvmGet(thc,0,ll)>= pi/3 && cvmGet(thc,0,ll)<= (pi/2+pi/18) || cvmGet(thc,0,ll) >=(2*pi/3-pi/18) && cvmGet(thc,0,ll)<= 2*pi/3){ /* matlab : xc = round(cot(thc(ll))*(-N/2+1:N/2-1))+N/2+1; */ float element_B = cvmGet(thc,0,ll); element_B = tan(element_B); //cotangent(0).. if(element_B == 0) element_B = 0.1; element_B = 1.0/element_B; for(i=0; i<blank_aa->cols-1; i++){ int element_C = round_ban(element_B * cvmGet(blank_aa,0, i))+N/2+1; cvmSet(xc,0,i,element_C); } for(nn = 0; nn<N-1; nn++){ /* matlab : M(nn+1,xc(nn)) = 1;*/ int elements_A = cvmGet(xc,0,nn); cvmSet(M,nn+1,elements_A,1.0); } }//if else{ if(cvmGet(thc,0,ll) <= pi/4 || cvmGet(thc,0,ll)>3*pi/4){ /* matlab : yr = round(tan(thc(ll))*(-N/2+1:N/2-1))+N/2+1; */ float element_B = cvmGet(thc,0,ll); element_B = tan(element_B); for(i=0; i<blank_aa->cols-1; i++){ int element_C = round_ban(element_B * cvmGet(blank_aa,0, i))+N/2+1; cvmSet(yr,0,i,element_C); } for(nn = 0; nn<N-1; nn++){ /* matlab : M(yr(nn), nn+1) = 1;*/ int elements_A = cvmGet(yr,0,nn); cvmSet(M,elements_A,nn+1,1.0); } } else{ /* matlab : xc = round(cot(thc(ll))*(-N/2+1:N/2-1))+N/2+1; */ float element_B = cvmGet(thc,0,ll); element_B = tan(element_B); //cotangent(0).. if(element_B == 0) element_B = 0.1; element_B = 1.0/element_B; for(i=0; i<blank_aa->cols-1; i++){ int element_C = round_ban(element_B * cvmGet(blank_aa,0, i))+N/2+1; cvmSet(xc,0,i,element_C); } for(nn = 0; nn<N-1; nn++){ /* matlab : M(nn+1,xc(nn)) = 1;*/ int elements_A = cvmGet(xc,0,nn); cvmSet(M,nn+1,elements_A,1.0); } } }//else }//for //upper half plane mask Mh = cvCreateMat(N,N,CV_32FC1); cvmSetZero(Mh); cvmCopy(M,Mh); /* matlab : Mh =(N/2+2:N,:) = 0; */ int r,c; for(c = 0; c<N; c++){ for(r = N/2+2; r<N; r++){ cvmSet(Mh, r, c, 0); } } /* matlab : Mh =(N/2+1,N/2+1:N) = 0; */ for(c = N/2; c<N; c++){ cvmSet(Mh, N/2+1, c, 0); } /* matlab : mMH = find(M)*/ int count = 0; for(c = 0; c< M->cols; c++){ for(r = 0; r< M->rows; r++){ if(cvmGet(M,r,c) != 0){ count=count+1; } } } int a_blank= count+1; mi = cvCreateMat(a_blank,1,CV_32F); count = 0; for(c = 0; c< M->cols; c++){ for(r = 0; r< M->rows; r++){ if(cvmGet(M,r,c) != 0){ float elements = cvmGet(M,r,c); cvmSet(mi,count,0, elements); //find the nonzero elements in matrix count=count+1; } } } /* matlab : mMH = find(M)*/ count = 0; for(c = 0; c< Mh->cols; c++){ for(r = 0; r< Mh->rows; r++){ if(cvmGet(Mh,r,c) != 0){ count=count+1; } } } a_blank= count; mhi = cvCreateMat(a_blank+1,1,CV_32F); count = 0; for(c = 0; c< Mh->cols; c++){ for(r = 0; r< Mh->rows; r++){ if(cvmGet(Mh,r,c) != 0){ float elements = cvmGet(Mh,r,c); cvmSet(mhi,count, 0, elements); //find the nonzero elements in matrix count=count+1; } } } cvReleaseMat(&thc); cvReleaseMat(&xc); cvReleaseMat(&yr); cvReleaseMat(&blank_aa); }
/* for now this function works bad with singular cases You can see in the code, that when some troubles with matrices or some variables occur - box filled with zero values is returned. However in general function works fine. */ static void icvFitEllipse_F( CvSeq* points, CvBox2D* box ) { CvMat* D = 0; CV_FUNCNAME( "icvFitEllipse_F" ); __BEGIN__; double S[36], C[36], T[36]; int i, j; double eigenvalues[6], eigenvectors[36]; double a, b, c, d, e, f; double x0, y0, idet, scale, offx = 0, offy = 0; int n = points->total; CvSeqReader reader; int is_float = CV_SEQ_ELTYPE(points) == CV_32FC2; CvMat _S = cvMat(6,6,CV_64F,S), _C = cvMat(6,6,CV_64F,C), _T = cvMat(6,6,CV_64F,T); CvMat _EIGVECS = cvMat(6,6,CV_64F,eigenvectors), _EIGVALS = cvMat(6,1,CV_64F,eigenvalues); /* create matrix D of input points */ CV_CALL( D = cvCreateMat( n, 6, CV_64F )); cvStartReadSeq( points, &reader ); /* shift all points to zero */ for( i = 0; i < n; i++ ) { if( !is_float ) { offx += ((CvPoint*)reader.ptr)->x; offy += ((CvPoint*)reader.ptr)->y; } else { offx += ((CvPoint2D32f*)reader.ptr)->x; offy += ((CvPoint2D32f*)reader.ptr)->y; } CV_NEXT_SEQ_ELEM( points->elem_size, reader ); } offx /= n; offy /= n; // fill matrix rows as (x*x, x*y, y*y, x, y, 1 ) for( i = 0; i < n; i++ ) { double x, y; double* Dptr = D->data.db + i*6; if( !is_float ) { x = ((CvPoint*)reader.ptr)->x - offx; y = ((CvPoint*)reader.ptr)->y - offy; } else { x = ((CvPoint2D32f*)reader.ptr)->x - offx; y = ((CvPoint2D32f*)reader.ptr)->y - offy; } CV_NEXT_SEQ_ELEM( points->elem_size, reader ); Dptr[0] = x * x; Dptr[1] = x * y; Dptr[2] = y * y; Dptr[3] = x; Dptr[4] = y; Dptr[5] = 1.; } // S = D^t*D cvMulTransposed( D, &_S, 1 ); cvSVD( &_S, &_EIGVALS, &_EIGVECS, 0, CV_SVD_MODIFY_A + CV_SVD_U_T ); for( i = 0; i < 6; i++ ) { double a = eigenvalues[i]; a = a < DBL_EPSILON ? 0 : 1./sqrt(sqrt(a)); for( j = 0; j < 6; j++ ) eigenvectors[i*6 + j] *= a; } // C = Q^-1 = transp(INVEIGV) * INVEIGV cvMulTransposed( &_EIGVECS, &_C, 1 ); cvZero( &_S ); S[2] = 2.; S[7] = -1.; S[12] = 2.; // S = Q^-1*S*Q^-1 cvMatMul( &_C, &_S, &_T ); cvMatMul( &_T, &_C, &_S ); // and find its eigenvalues and vectors too //cvSVD( &_S, &_EIGVALS, &_EIGVECS, 0, CV_SVD_MODIFY_A + CV_SVD_U_T ); cvEigenVV( &_S, &_EIGVECS, &_EIGVALS, 0 ); for( i = 0; i < 3; i++ ) if( eigenvalues[i] > 0 ) break; if( i >= 3 /*eigenvalues[0] < DBL_EPSILON*/ ) { box->center.x = box->center.y = box->size.width = box->size.height = box->angle = 0.f; EXIT; } // now find truthful eigenvector _EIGVECS = cvMat( 6, 1, CV_64F, eigenvectors + 6*i ); _T = cvMat( 6, 1, CV_64F, T ); // Q^-1*eigenvecs[0] cvMatMul( &_C, &_EIGVECS, &_T ); // extract vector components a = T[0]; b = T[1]; c = T[2]; d = T[3]; e = T[4]; f = T[5]; ///////////////// extract ellipse axes from above values //////////////// /* 1) find center of ellipse it satisfy equation | a b/2 | * | x0 | + | d/2 | = |0 | | b/2 c | | y0 | | e/2 | |0 | */ idet = a * c - b * b * 0.25; idet = idet > DBL_EPSILON ? 1./idet : 0; // we must normalize (a b c d e f ) to fit (4ac-b^2=1) scale = sqrt( 0.25 * idet ); if( scale < DBL_EPSILON ) { box->center.x = (float)offx; box->center.y = (float)offy; box->size.width = box->size.height = box->angle = 0.f; EXIT; } a *= scale; b *= scale; c *= scale; d *= scale; e *= scale; f *= scale; x0 = (-d * c + e * b * 0.5) * 2.; y0 = (-a * e + d * b * 0.5) * 2.; // recover center box->center.x = (float)(x0 + offx); box->center.y = (float)(y0 + offy); // offset ellipse to (x0,y0) // new f == F(x0,y0) f += a * x0 * x0 + b * x0 * y0 + c * y0 * y0 + d * x0 + e * y0; if( fabs(f) < DBL_EPSILON ) { box->size.width = box->size.height = box->angle = 0.f; EXIT; } scale = -1. / f; // normalize to f = 1 a *= scale; b *= scale; c *= scale; // extract axis of ellipse // one more eigenvalue operation S[0] = a; S[1] = S[2] = b * 0.5; S[3] = c; _S = cvMat( 2, 2, CV_64F, S ); _EIGVECS = cvMat( 2, 2, CV_64F, eigenvectors ); _EIGVALS = cvMat( 1, 2, CV_64F, eigenvalues ); cvSVD( &_S, &_EIGVALS, &_EIGVECS, 0, CV_SVD_MODIFY_A + CV_SVD_U_T ); // exteract axis length from eigenvectors box->size.width = (float)(2./sqrt(eigenvalues[0])); box->size.height = (float)(2./sqrt(eigenvalues[1])); // calc angle box->angle = (float)(180 - atan2(eigenvectors[2], eigenvectors[3])*180/CV_PI); __END__; cvReleaseMat( &D ); }
bool Classifier::run_boxscan(IplImage *dst, vector<int> &cluster, vector<CvSURFPoint> &keypts, vector<float *> &pts, vector<FoundObject> &newObjects, const CObjectList *oldObjects) { float scale = 2.0f; int maxWidth = dst->width; int maxHeight = dst->height; int numLayers = 7; while(numLayers-- > 0) { for (int x = 0; x < maxWidth - 32*scale; x += 8) { for (int y = 0; y < maxHeight - 32*scale; y += 8) { vector<int> newpts; for (int i = 0; i < (int)pts.size(); ++i) { if (keypts[i].pt.x >= x && keypts[i].pt.x < x + 32*scale && keypts[i].pt.y >= y && keypts[i].pt.y < y + 32*scale) newpts.push_back(i); } if (newpts.size() < MIN_IPOINTS) continue; CvMat *query = cvCreateMat(1, num_clusters, CV_32FC1); cvSet(query, cvScalar(0)); for (int row = 0; row < (int)newpts.size(); ++row) { int idx = newpts[row]; int cluster_idx = cluster[idx]; int oldVal = cvGetReal2D(query, 0, cluster_idx); cvSetReal2D(query, 0, cluster_idx, oldVal+1); //cout << row << " " << idx << " " << cluster_idx << " " << oldVal << endl; } float scores[kNumObjectTypes]; int klass = btrees.predict(query, scores); int min_score = -3; // cout << "I think it's a "; for (int klass = 0; klass < kNumObjectTypes - 1; ++klass) { // cout << classIntToString(klass) << " (" << scores[klass] << ") "; min_score = (scores[klass] < min_score) ? scores[klass] : min_score; } // cout << endl; if (klass != kOther) { FoundObject fo; CObject o; o.rect = cvRect(x * scale, y * scale, 48*scale, 48*scale); o.label = classIntToString(klass); fo.object = o; fo.score = min_score * scale; newObjects.push_back(fo); } // // CObject o; // o.rect = cvRect(x, y, 32*scale, 32*scale); // o.label = classIntToString(klass); // showRect(dst, &o, &keypts); } } scale *= 1.1; } // Possibly add found objects vector<int> toRemove; float minScore = 100.0f; FoundObject minScoreObject; // prefer lower-scoring for (int i = 0; i < (int)newObjects.size(); ++i) { if (newObjects[i].score < minScore) { minScore = newObjects[i].score; minScoreObject = newObjects[i]; } } // find larger objects bool foundbigger = true; while(foundbigger) { foundbigger = false; for (int i = (int)newObjects.size() - 1; i >= 0; --i) { if (newObjects[i].object.overlap(minScoreObject.object) > minScoreObject.object.area() * 0.8 && newObjects[i].object.area() > minScoreObject.object.area()) { foundbigger = true; minScoreObject = newObjects[i]; } <<<<<<< HEAD:vision/classifier.cpp ++where; } }
void pkmGaussianMixtureModel::modelData(int minComponents, int maxComponents, double regularizingFactor, double stoppingThreshold) { // indicator will contain the assignments of each data point to // the mixture components, as result of the E-step // double * indicator = new double[k * m_nObservations]; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // // Use as an initial approxiamation, a diagonal covariance matrix // taken from the mean covariances // Could instead use K-Means (see opencv function, kmeans2) // // Alternatively, the algorithm may start with M-step when // initial values for pi,k can be provided. Another alternative, // when pi,k are unknown, is to use a simpler clustering algorithm // to pre-cluster the input samples and thus obtain initial pi,k. // Often (and in ML) k-means algorithm is used for that purpose. // // One of the main that EM algorithm should deal with is the large // number of parameters to estimate. The majority of the parameters // sits in covariation matrices, which are d×d elements each // (where d is the feature space dimensionality). However, in many // practical problems the covariation matrices are close to diagonal, // or even to μk*I, where I is identity matrix and μk is // mixture-dependent "scale" parameter. So a robust computation // scheme could be to start with the harder constraints on the // covariation matrices and then use the estimated parameters as an // input for a less constrained optimization problem (often a // diagonal covariation matrix is already a good enough approximation). // // References: // // 1. [Bilmes98] J. A. Bilmes. A Gentle Tutorial of the EM Algorithm // and its Application to Parameter Estimation for Gaussian Mixture // and Hidden Markov Models. Technical Report TR-97-021, // International Computer Science Institute and Computer Science // Division, University of California at Berkeley, April 1998. //// This code is for indexing (observations x variables) emModel = new CvEM[maxComponents-minComponents+1]; //////////////////////////////////////////////////////////// // EM int i; double minBIC = HUGE_VAL; if(maxComponents >= m_nObservations) { maxComponents = m_nObservations-1; } if(minComponents > maxComponents) { minComponents = maxComponents = m_nObservations-1; } for (int k = minComponents; k <= maxComponents; k++) { #if 0 ////////////////////////////////////////////////////////////// // Create a list of random indexes from 1 : K // from the permutations of the number of observations int * randIndex = new int[m_nObservations]; // 1:N for (i = 0; i < m_nObservations; i++) randIndex[i] = i; // Shuffle the array for (i = 0; i < (m_nObservations-1); i++) { // Random position int r = i + (rand() % (m_nObservations-i)); // Swap int temp = randIndex[i]; randIndex[i] = randIndex[r]; randIndex[r] = temp; } ////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // Random initial kernels float * estMU = new float[k*m_nVariables]; for( int row = 0; row < k; row++ ) { int ind = randIndex[row]; for( int col = 0; col < m_nVariables; col++ ) { // Get each variable at index ind (of the random kernels) // from the input data into estMu estMU[row*m_nVariables+col] = ((float*)(m_pCvData->data.ptr + m_pCvData->step*ind))[col]; } } CvMat param_mean; cvInitMatHeader(¶m_mean, k, m_nVariables, CV_32FC1, estMU); //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // Calculate the Covariance matrix (assume this is a 2x2 Matrix) CvMat *m_pCvCov = cvCreateMat(m_nVariables, m_nVariables, CV_32FC1); CvMat *m_pCvMu = cvCreateMat(m_nVariables, 1, CV_32FC1); CvMat **dat = (CvMat**)cvAlloc( m_nObservations * sizeof(*dat) ); for (i = 0; i < m_nObservations; i++) { CvMat *tempData = cvCreateMat(m_nVariables, 1, CV_32FC1); CV_MAT_ELEM(*tempData, float, 0, 0) = CV_MAT_ELEM(*m_pCvData, float, i, 0); CV_MAT_ELEM(*tempData, float, 1, 0) = CV_MAT_ELEM(*m_pCvData, float, i, 1); dat[i] = tempData; } cvCalcCovarMatrix((const CvArr**)dat, m_nObservations, m_pCvCov, m_pCvMu, CV_COVAR_NORMAL); //|CV_COVAR_SCALE); // Store k (all axes) Matrices of Diagonal Covariance Matrices // initialized to 1/10th of the max of the diag values // of the mean variance as the estimated covariances CvMat **param_cov = (CvMat**)cvAlloc( k * sizeof(*param_cov) ); float covMax = MAX(CV_MAT_ELEM(*m_pCvCov, float, 0, 0), CV_MAT_ELEM(*m_pCvCov, float, 1, 1)) / 10.; for (int kern = 0; kern < k; kern++) { CvMat *tempData = cvCreateMat(m_nVariables, m_nVariables, CV_32FC1); CV_MAT_ELEM(*tempData, float, 0, 0) = covMax; CV_MAT_ELEM(*tempData, float, 0, 1) = 0.0f; CV_MAT_ELEM(*tempData, float, 1, 0) = 0.0f; CV_MAT_ELEM(*tempData, float, 1, 1) = covMax; param_cov[kern] = tempData; } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // Random mixing probabilities for each kernel float * estPP = new float[k]; for (i = 0; i < k; i++) { estPP[i] = 1.0/(float)k; } // Weights for each kernel CvMat param_weight; cvInitMatHeader(¶m_weight, k, 1, CV_32FC1, estPP); //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// float *estProb = new float[k*m_nObservations]; for (i = 0; i < k; i++) { for(int j = 0; j < m_nObservations; j++) { estProb[i*j] = estPP[i] / 2.0; } } // Create a Cv Matrix for the mix prob CvMat param_prob; cvInitMatHeader(¶m_prob, m_nObservations, k, CV_32FC1, estProb); //////////////////////////////////////////////////////////// // Initialize parameters CvEMParams emParam; emParam.covs = (const CvMat **)param_cov; emParam.means = ¶m_mean; emParam.weights = ¶m_weight; emParam.probs = NULL;//¶m_prob; emParam.nclusters = k+1; emParam.cov_mat_type = CvEM::COV_MAT_GENERIC;//CvEM::COV_MAT_DIAGONAL;////CvEM::COV_MAT_SPHERICAL; emParam.start_step = CvEM::START_E_STEP; //CvEM::START_AUTO_STEP; // initialize with k-means emParam.term_crit.epsilon = 0.00001; emParam.term_crit.max_iter = 50; emParam.term_crit.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS; // Train emModel[k-minComponents].train(m_pCvData, 0, emParam, 0); double thisLikelihood = emModel[k-minComponents].get_log_likelihood(); //double BIC = -2.*thisLikelihood - (double)k*log((double)m_nObservations*10); double BIC = -m_nObservations*thisLikelihood + k/2.*log((double)m_nObservations); printf("K: %d, BIC: %f\n", k, BIC); if (BIC < minLikelihood) { bestModel = k-minComponents; minLikelihood = BIC; } delete [] randIndex; delete [] estMU; delete [] estPP; #else CvEMParams emParam; emParam.covs = NULL; emParam.means = NULL; emParam.weights = NULL; emParam.probs = NULL; emParam.nclusters = k; emParam.cov_mat_type = m_covType;//CvEM::COV_MAT_SPHERICAL;//CvEM::COV_MAT_DIAGONAL;////CvEM::COV_MAT_GENERIC;//; emParam.start_step = CvEM::START_AUTO_STEP; //CvEM::START_AUTO_STEP; // initialize with k-means emParam.term_crit.epsilon = 0.01; emParam.term_crit.max_iter = 100; emParam.term_crit.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS; // Train emModel[k-minComponents].train(m_pCvData, 0, emParam, 0); // Calculate the log likelihood of the model const CvMat *weights = emModel[k-minComponents].get_weights(); const CvMat *probs = emModel[k-minComponents].get_probs(); const CvMat **modelCovs = emModel[k-minComponents].get_covs(); const CvMat *modelMus = emModel[k-minComponents].get_means(); const CvMat *modelWeights = emModel[k-minComponents].get_weights(); double thisLikelihood; if(k == 1) // mlem.cpp does not calculate the log_likelihood for 1 cluster // (why i have no idea?! it sets log_likelihood = DBL_MAX/1000.;!?) // so i compute it here. though this seems to pair up with the // same value you get for 2 kernels, it does not pair up for // anything higher? { double _log_likelihood = 0;//-CV_LOG2PI * (double)m_nObservations * (double)m_nVariables / 2.; CvMat *pts = cvCreateMat(m_nVariables, 1, CV_64FC1); CvMat *mean = cvCreateMat(m_nVariables, 1, CV_64FC1); for( int n = 0; n < m_nObservations; n++ ) { double sum = 0; cvmSet(pts, 0, 0, cvmGet(m_pCvData, n, 0)); cvmSet(pts, 1, 0, cvmGet(m_pCvData, n, 1)); double* pp = (double*)(probs->data.ptr + probs->step*n); for( int d = 0; d < k; d++ ) { const CvMat * covar = modelCovs[d]; cvmSet(mean, 0, 0, cvmGet(modelMus, d, 0)); cvmSet(mean, 1, 0, cvmGet(modelMus, d, 1)); double p_x = multinormalDistribution(pts, mean, covar); double w_k = cvmGet(weights, 0, d); sum += p_x * w_k;// * pp[d]; //printf("%f + %f += %f\n", p_x, w_k, sum); } _log_likelihood -= log(sum); } thisLikelihood = -_log_likelihood;//emModel[k-minComponents].get_log_likelihood(); } else { thisLikelihood = emModel[k-minComponents].get_log_likelihood(); } // Calculate the Bit Information Criterion for Model Selection double vars = (double)m_nVariables; double N_p = ((double)k-1.)+(double)k*(vars + vars*(vars+1.)/2.); double BIC = -2.*thisLikelihood + N_p*log((double)m_nObservations); //printf("K: %d, like: %f, BIC: %f\n", k, thisLikelihood, BIC); if (BIC < minBIC) { // update variables with the best bic and best model subscript bestModel = k-minComponents; minBIC = BIC; // store the bic and likelihood for printing later m_BIC = BIC; m_Likelihood = thisLikelihood; } #endif } bModeled = true; // m_pCvProb = emModel.get_probs; }
int main(int argc, char *argv[]) { if (argc < 3){ printf("Usage: %s <image-file-name1> <image-file-name2>\n", argv[0]); exit(1); } IplImage* img1 = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE); if (!img1) { printf("Could not load image file: %s\n", argv[1]); exit(1); } IplImage* img1f = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1); cvConvertScale(img1, img1f, 1.0 / 255.0); IplImage* img2 = cvLoadImage(argv[2], CV_LOAD_IMAGE_GRAYSCALE); if (!img2) { printf("Could not load image file: %s\n", argv[2]); exit(1); } IplImage* img2f = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1); cvConvertScale(img2, img2f, 1.0 / 255.0); /** * Aufgabe: Homographien (5 Punkte) * * Unter der Annahme, dass Bilder mit einer verzerrungsfreien Lochbildkamera * aufgenommen werden, kann man Aufnahmen mit verschiedenen Bildebenen und * gleichem Projektionszentren durch projektive Abbildungen, sogenannte * Homographien, beschreiben. * * - Schreibe Translation als Homographie auf (auf Papier!). * - Verschiebe die Bildebene eines Testbildes um 20 Pixel nach rechts, ohne * das Projektionszentrum zu ändern. Benutze dafür \code{cvWarpPerspective}. * - Wieviele Punktkorrespondenzen benötigt man mindestens, um eine projektive * Abbildung zwischen zwei Bildern bis auf eine Skalierung eindeutig zu * bestimmen? Warum? (Schriftlich beantworten!) */ /* TODO */ IplImage* img_moved = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1); cv::Mat matImg1f_task1 = cv::Mat(img1f); cv::Mat matImgMoved = cv::Mat(img_moved); float data[] = { 1, 0, -20, 0, 1, 0, 0, 0, 1 }; cv::Mat trans(3, 3, CV_32FC1, data);; cv::warpPerspective(matImg1f_task1, matImgMoved, trans, matImgMoved.size()); cv::namedWindow("mainWin", CV_WINDOW_AUTOSIZE); cv::Mat img_moved_final(img_moved); cv::imshow("mainWin", img_moved_final); cvWaitKey(0); /** * Aufgabe: Panorama (15 Punkte) * * Ziel dieser Aufgabe ist es, aus zwei gegebenen Bildern ein Panorama zu konstruieren. * \begin{center} * \includegraphics[width = 0.3\linewidth]{left.png} * \includegraphics[width = 0.3\linewidth]{right.png} * \end{center} * * Dafür muss zunächst aus den gegeben Punktkorrespondenzen * \begin{center} * \begin{tabular}{|c|c|} * \hline * linkes Bild & rechtes Bild \\ * $(x, y)$ & $(x, y)$ \\ \hline \hline * (463, 164) & (225, 179)\\ \hline * (530, 357) & (294, 370)\\ \hline * (618, 357) &(379, 367)\\ \hline * (610, 153) & (369, 168)\\ \hline * \end{tabular} * \end{center} * eine perspektivische Transformation bestimmt werden, mit der die Bilder auf eine gemeinsame Bildebene transformiert werden können. * * - Berechne die Transformation aus den gegebenen Punktkorrespondenzen. * Benutze die Funktion \code{cvGetPerspectiveTransform}. Was ist die * zentrale Idee des DLT-Algorithmus, wie er in der Vorlesung vorgestellt * wurde? */ /* TODO */ CvMat *P = cvCreateMat(3, 3, CV_32FC1); CvPoint points1[] = { cvPoint(463, 164), cvPoint(530, 357), cvPoint(618, 357), cvPoint(610, 153) }; CvPoint points2[] = { cvPoint(225, 179), cvPoint(294, 370), cvPoint(379, 367), cvPoint(369, 168) }; CvPoint2D32f pt1[4], pt2[4]; for (int i = 0; i < 4; ++i) { pt2[i].x = points2[i].x; pt2[i].y = points2[i].y; pt1[i].x = points1[i].x; pt1[i].y = points1[i].y; } cvGetPerspectiveTransform(pt1, pt2, P); /** * - Bestimme die notwendige Bildgröße für das Panoramabild. */ /* TODO */ int h = img1f->height - 1; int w = img1f->width - 1; float p1[] = { 0.0, 0.0, 1.0 }; float p2[] = { 0.0, (float)(h), 1.0 }; float p3[] = { (float)(w), (float)(h), 1.0 }; float p4[] = { (float)(w), 0.0, 1.0 }; cv::Mat P1 = P * cv::Mat(3, 1, CV_32FC1, p1); cv::Mat P2 = P * cv::Mat(3, 1, CV_32FC1, p2); cv::Mat P3 = P * cv::Mat(3, 1, CV_32FC1, p3); cv::Mat P4 = P * cv::Mat(3, 1, CV_32FC1, p4); // mustn't be zero assert(P1.at<float>(2,0) != 0 && P2.at<float>(2,0) != 0 && P3.at<float>(2,0) != 0 && P4.at<float>(2,0) != 0); P1 = P1 / P1.at<float>(2,0); P2 = P2 / P2.at<float>(2,0); P3 = P3 / P3.at<float>(2,0); P4 = P4 / P4.at<float>(2,0); /** * - Projiziere das linke Bild in die Bildebene des rechten Bildes. Beachte * dabei, dass auch der linke Bildrand in das Panoramabild projiziert * wird. */ /* TODO */ ///////// Hier wird irgendwo ein fehler sein bei der Groesse... std::vector<cv::Mat*> matrices; matrices.push_back(&P1); matrices.push_back(&P2); matrices.push_back(&P3); matrices.push_back(&P4); cv::Point minP(P1.at<float>(0,0), P1.at<float>(1,0)), maxP(P1.at<float>(0,0), P1.at<float>(1,0)); for(int i = 0; i < matrices.size(); ++i) { minP.x = (int)(min(matrices[i]->at<float>(0,0), (float)minP.x)); minP.y = (int)(min(matrices[i]->at<float>(1,0), (float)minP.y)); maxP.x = (int)(max(matrices[i]->at<float>(0,0), (float)maxP.x)+1.0); maxP.y = (int)(max(matrices[i]->at<float>(1,0), (float)maxP.y)+1.0); } minP.x = min(minP.x, 0); minP.y = min(minP.y, 0); maxP.x = max(maxP.x, img1f->width-1); maxP.y = max(maxP.y, img1f->height-1); // create image cv::Mat Panorama = cv::Mat(cv::Size(maxP-minP), CV_32FC1, cv::Scalar(0.0)); cv::Mat PLeft = cv::Mat(cv::Size(maxP-minP), CV_32FC1, cv::Scalar(0.0)); cv::Mat PRight = cv::Mat(cv::Size(maxP-minP), CV_32FC1, cv::Scalar(0.0)); cv::Mat matImg1f = cv::Mat( img1f); cv::Mat matImg2f = cv::Mat( img2f); for(int y=0; y < matImg1f.rows; ++y ) { for(int x=0; x < matImg1f.cols; ++x ) { PLeft.at<float>(y,x) = matImg1f.at<float>(y,x); } } for(int y=0; y < matImg2f.rows; ++y ) { for(int x=0; x < matImg2f.cols; ++x ) { PRight.at<float>(y,x) = matImg2f.at<float>(y,x); } } cv::imshow("mainWin", PLeft); cv::waitKey(0); cv::imshow("mainWin", PRight); cv::waitKey(0); float trans2[] = { 1.0, 0.0, -minP.x, 0.0, 1.0, -minP.y, 0.0, 0.0, 1.0}; cv::Mat translation(3,3,CV_32FC1,trans2); //translate P cv::Mat Pnew = translation*cv::Mat(P); cv::warpPerspective(PLeft, Panorama, Pnew, Panorama.size()); cv::warpPerspective(PRight, PLeft, translation, PLeft.size()); PRight = PLeft.clone(); cv::imshow("mainWin", PLeft); cv::waitKey(0); cv::imshow("mainWin", Panorama); cv::waitKey(0); /** * - Bilde das Panoramabild, so dass Pixel, für die zwei Werte vorhanden sind, * den Mittelwert zugeordnet bekommen. */ cv::Mat mask = (Panorama > 0.0) & (PLeft > 0.0); cv::imshow("mainWin", mask); cv::waitKey(0); mask.convertTo(mask,CV_32FC1, 0.5/255.); cv::Mat weighted = cv::Mat(Panorama.size(), CV_32FC1, cv::Scalar(1.0)) - mask; Panorama = Panorama + PLeft; cv::multiply(Panorama, weighted, Panorama); cv::imshow("mainWin", Panorama); cv::waitKey(0); /* TODO */ /** * - Zeige das Panoramabild an. */ /* TODO */ }
static CvMat * cvCreateImageMat( CvSize size, int depth, int channels ) { depth = icvIplToCvDepth(depth); return cvCreateMat( size.height, size.width, CV_MAKE_TYPE(depth, channels)); }
/* Optimization using Levenberg-Marquardt */ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction, pointer_LMFunc function, /*pointer_Err error_function,*/ CvMat *X0,CvMat *observRes,CvMat *resultX, int maxIter,double epsilon) { /* This is not sparce method */ /* Make optimization using */ /* func - function to compute */ /* uses function to compute jacobian */ /* Allocate memory */ CvMat *vectX = 0; CvMat *vectNewX = 0; CvMat *resFunc = 0; CvMat *resNewFunc = 0; CvMat *error = 0; CvMat *errorNew = 0; CvMat *Jac = 0; CvMat *delta = 0; CvMat *matrJtJ = 0; CvMat *matrJtJN = 0; CvMat *matrJt = 0; CvMat *vectB = 0; CV_FUNCNAME( "cvLevenbegrMarquardtOptimization" ); __BEGIN__; if( JacobianFunction == 0 || function == 0 || X0 == 0 || observRes == 0 || resultX == 0 ) { CV_ERROR( CV_StsNullPtr, "Some of parameters is a NULL pointer" ); } if( !CV_IS_MAT(X0) || !CV_IS_MAT(observRes) || !CV_IS_MAT(resultX) ) { CV_ERROR( CV_StsUnsupportedFormat, "Some of input parameters must be a matrices" ); } int numVal; int numFunc; double valError; double valNewError; numVal = X0->rows; numFunc = observRes->rows; /* test input data */ if( X0->cols != 1 ) { CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector X0 must be 1" ); } if( observRes->cols != 1 ) { CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector observed rusult must be 1" ); } if( resultX->cols != 1 || resultX->rows != numVal ) { CV_ERROR( CV_StsUnmatchedSizes, "Size of result vector X must be equals to X0" ); } if( maxIter <= 0 ) { CV_ERROR( CV_StsUnmatchedSizes, "Number of maximum iteration must be > 0" ); } if( epsilon < 0 ) { CV_ERROR( CV_StsUnmatchedSizes, "Epsilon must be >= 0" ); } /* copy x0 to current value of x */ CV_CALL( vectX = cvCreateMat(numVal, 1, CV_64F) ); CV_CALL( vectNewX = cvCreateMat(numVal, 1, CV_64F) ); CV_CALL( resFunc = cvCreateMat(numFunc,1, CV_64F) ); CV_CALL( resNewFunc = cvCreateMat(numFunc,1, CV_64F) ); CV_CALL( error = cvCreateMat(numFunc,1, CV_64F) ); CV_CALL( errorNew = cvCreateMat(numFunc,1, CV_64F) ); CV_CALL( Jac = cvCreateMat(numFunc,numVal, CV_64F) ); CV_CALL( delta = cvCreateMat(numVal, 1, CV_64F) ); CV_CALL( matrJtJ = cvCreateMat(numVal, numVal, CV_64F) ); CV_CALL( matrJtJN = cvCreateMat(numVal, numVal, CV_64F) ); CV_CALL( matrJt = cvCreateMat(numVal, numFunc,CV_64F) ); CV_CALL( vectB = cvCreateMat(numVal, 1, CV_64F) ); cvCopy(X0,vectX); /* ========== Main optimization loop ============ */ double change; int currIter; double alpha; change = 1; currIter = 0; alpha = 0.001; do { /* Compute value of function */ function(vectX,resFunc); /* Print result of function to file */ /* Compute error */ cvSub(observRes,resFunc,error); //valError = error_function(observRes,resFunc); /* Need to use new version of computing error (norm) */ valError = cvNorm(observRes,resFunc); /* Compute Jacobian for given point vectX */ JacobianFunction(vectX,Jac); /* Define optimal delta for J'*J*delta=J'*error */ /* compute J'J */ cvMulTransposed(Jac,matrJtJ,1); cvCopy(matrJtJ,matrJtJN); /* compute J'*error */ cvTranspose(Jac,matrJt); cvmMul(matrJt,error,vectB); /* Solve normal equation for given alpha and Jacobian */ do { /* Increase diagonal elements by alpha */ for( int i = 0; i < numVal; i++ ) { double val; val = cvmGet(matrJtJ,i,i); cvmSet(matrJtJN,i,i,(1+alpha)*val); } /* Solve system to define delta */ cvSolve(matrJtJN,vectB,delta,CV_SVD); /* We know delta and we can define new value of vector X */ cvAdd(vectX,delta,vectNewX); /* Compute result of function for new vector X */ function(vectNewX,resNewFunc); cvSub(observRes,resNewFunc,errorNew); valNewError = cvNorm(observRes,resNewFunc); currIter++; if( valNewError < valError ) {/* accept new value */ valError = valNewError; /* Compute relative change of required parameter vectorX. change = norm(curr-prev) / norm(curr) ) */ change = cvNorm(vectX, vectNewX, CV_RELATIVE_L2); alpha /= 10; cvCopy(vectNewX,vectX); break; } else { alpha *= 10; } } while ( currIter < maxIter ); /* new value of X and alpha were accepted */ } while ( change > epsilon && currIter < maxIter ); /* result was computed */ cvCopy(vectX,resultX); __END__; cvReleaseMat(&vectX); cvReleaseMat(&vectNewX); cvReleaseMat(&resFunc); cvReleaseMat(&resNewFunc); cvReleaseMat(&error); cvReleaseMat(&errorNew); cvReleaseMat(&Jac); cvReleaseMat(&delta); cvReleaseMat(&matrJtJ); cvReleaseMat(&matrJtJN); cvReleaseMat(&matrJt); cvReleaseMat(&vectB); return; }
/*-------------------------------------------------------------------------------------*/ void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr) { /* Compute number of good points */ int num = cvCountNonZero(status); /* Create arrays */ CvMat *objPoints = 0; objPoints = cvCreateMat(4,num,CV_64F); CvMat *points2D = 0; points2D = cvCreateMat(2,num,CV_64F); int currVis = 0; int i; #if 1 FILE *file; file = fopen("d:\\test\\projStatus.txt","w"); #endif int totalNum = objPoints4D->cols; for( i = 0; i < totalNum; i++ ) { fprintf(file,"%d (%d) ",i,status->data.ptr[i]); if( status->data.ptr[i] ) { #if 1 double X,Y,Z,W; double x,y; X = cvmGet(objPoints4D,0,i); Y = cvmGet(objPoints4D,1,i); Z = cvmGet(objPoints4D,2,i); W = cvmGet(objPoints4D,3,i); x = cvmGet(points2,0,i); y = cvmGet(points2,1,i); fprintf(file,"%d (%lf %lf %lf %lf) - (%lf %lf)",i,X,Y,Z,W,x,y ); #endif cvmSet(objPoints,0,currVis,cvmGet(objPoints4D,0,i)); cvmSet(objPoints,1,currVis,cvmGet(objPoints4D,1,i)); cvmSet(objPoints,2,currVis,cvmGet(objPoints4D,2,i)); cvmSet(objPoints,3,currVis,cvmGet(objPoints4D,3,i)); cvmSet(points2D,0,currVis,cvmGet(points2,0,i)); cvmSet(points2D,1,currVis,cvmGet(points2,1,i)); currVis++; } fprintf(file,"\n"); } #if 1 fclose(file); #endif icvComputeProjectMatrix(objPoints,points2D,projMatr); /* Free allocated memory */ cvReleaseMat(&objPoints); cvReleaseMat(&points2D); }
void icvAddNewImageToPrevious____( IplImage *newImage,//Image to add IplImage *oldImage,//Previous image CvMat *oldPoints,// previous 2D points on prev image (some points may be not visible) CvMat *oldPntStatus,//Status for each point on prev image CvMat *objPoints4D,//prev 4D points CvMat *newPoints, //Points on new image corr for prev CvMat *newPntStatus,// New point status for new image CvMat *newFPoints2D1,//new feature points on prev image CvMat *newFPoints2D2,//new feature points on new image CvMat *newFPointsStatus, CvMat *newProjMatr, int useFilter, double threshold)//New projection matrix { CvMat *points2 = 0; CvMat *status = 0; CvMat *newFPointsStatusTmp = 0; //CV_FUNCNAME( "icvAddNewImageToPrevious____" ); __BEGIN__; /* First found correspondence points for images */ /* Test input params */ int numPoints; numPoints = oldPoints->cols; /* Allocate memory */ points2 = cvCreateMat(2,numPoints,CV_64F); status = cvCreateMat(1,numPoints,CV_8S); newFPointsStatusTmp = cvCreateMat(1, newFPoints2D1->cols,CV_8S); int corrNum; corrNum = icvFindCorrForGivenPoints( oldImage,/* Image 1 */ newImage,/* Image 2 */ oldPoints, oldPntStatus, points2, status, useFilter,/*Use fundamental matrix to filter points */ threshold);/* Threshold for good points in filter */ cvCopy(status,newPntStatus); cvCopy(points2,newPoints); CvMat projMatr; double projMatr_dat[12]; projMatr = cvMat(3,4,CV_64F,projMatr_dat); if( corrNum >= 6 ) {/* We can compute projection matrix */ // icvComputeProjectMatrix(objPoints4D,points2,&projMatr); icvComputeProjectMatrixStatus(objPoints4D,points2,status,&projMatr); cvCopy(&projMatr,newProjMatr); /* Create new points and find correspondence */ icvCreateFeaturePoints(newImage, newFPoints2D2,newFPointsStatus); /* Good if we test new points before find corr points */ /* Find correspondence for new found points */ icvFindCorrForGivenPoints( newImage,/* Image 1 */ oldImage,/* Image 2 */ newFPoints2D2, newFPointsStatus,//prev status newFPoints2D1, newFPointsStatusTmp,//new status useFilter,/*Use fundamental matrix to filter points */ threshold);/* Threshold for good points in filter */ /* We generated new points on image test for exist points */ /* Remove all new double points */ int origNum; /* Find point of old image */ origNum = icvRemoveDoublePoins( oldPoints,/* Points on prev image */ newFPoints2D1,/* New points */ oldPntStatus,/* Status for old points */ newFPointsStatusTmp, newFPointsStatusTmp,//orig status 20);/* Status for new points */ /* Find double points on new image */ origNum = icvRemoveDoublePoins( newPoints,/* Points on prev image */ newFPoints2D2,/* New points */ newPntStatus,/* Status for old points */ newFPointsStatusTmp, newFPointsStatusTmp,//orig status 20);/* Status for new points */ /* Add all new good points to result */ /* Copy new status to old */ cvCopy(newFPointsStatusTmp,newFPointsStatus); } __END__; /* Free allocated memory */ return; }
/*-------------------------------------------------------------------------------------*/ int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum) { /* Add to existing points and status arrays new points or just grow */ CvMat *newOldPoint = 0; CvMat *newOldStatus = 0; int newTotalNumber = 0; CV_FUNCNAME( "icvGrowPointsAndStatus" ); __BEGIN__; /* Test for errors */ if( oldPoints == 0 || oldStatus == 0 ) { CV_ERROR( CV_StsNullPtr, "Some of parameters is a NULL pointer" ); } if( *oldPoints == 0 || *oldStatus == 0 ) { CV_ERROR( CV_StsNullPtr, "Some of parameters is a NULL pointer" ); } if( !CV_IS_MAT(*oldPoints)) { CV_ERROR( CV_StsUnsupportedFormat, "oldPoints must be a pointer to a matrix" ); } if( !CV_IS_MASK_ARR(*oldStatus)) { CV_ERROR( CV_StsUnsupportedFormat, "oldStatus must be a pointer to a mask array" ); } int oldNum; oldNum = (*oldPoints)->cols; if( oldNum < 1 ) { CV_ERROR( CV_StsOutOfRange, "Number of old points must be > 0" ); } /* Define if need number of add points */ int addNum; addNum = 0; if( addPoints != 0 && addStatus != 0 ) {/* We have aditional points */ if( CV_IS_MAT(addPoints) && CV_IS_MASK_ARR(addStatus) ) { addNum = addPoints->cols; if( addStatus->cols != addNum ) { CV_ERROR( CV_StsOutOfRange, "Number of add points and statuses must be the same" ); } } } /* */ int numCoord; numCoord = (*oldPoints)->rows; newTotalNumber = oldNum + addNum + addCreateNum; if( newTotalNumber ) { /* Free allocated memory */ newOldPoint = cvCreateMat(numCoord,newTotalNumber,CV_64F); newOldStatus = cvCreateMat(1,newTotalNumber,CV_8S); /* Copy old values to */ int i; /* Clear all values */ cvZero(newOldPoint); cvZero(newOldStatus); for( i = 0; i < oldNum; i++ ) { int currCoord; for( currCoord = 0; currCoord < numCoord; currCoord++ ) { cvmSet(newOldPoint,currCoord,i,cvmGet(*oldPoints,currCoord,i)); } newOldStatus->data.ptr[i] = (*oldStatus)->data.ptr[i]; } /* Copy additional points and statuses */ if( addNum ) { for( i = 0; i < addNum; i++ ) { int currCoord; for( currCoord = 0; currCoord < numCoord; currCoord++ ) { cvmSet(newOldPoint,currCoord,i+oldNum,cvmGet(addPoints,currCoord,i)); } newOldStatus->data.ptr[i+oldNum] = addStatus->data.ptr[i]; //cvmSet(newOldStatus,0,i,cvmGet(addStatus,0,i)); } } /* Delete previous data */ cvReleaseMat(oldPoints); cvReleaseMat(oldStatus); /* copy pointers */ *oldPoints = newOldPoint; *oldStatus = newOldStatus; } __END__; return newTotalNumber; }
/* Returns number of corresponding points */ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */ IplImage *image2,/* Image 2 */ CvMat *points1, CvMat *pntStatus1, CvMat *points2, CvMat *pntStatus2, int useFilter,/*Use fundamental matrix to filter points */ double threshold)/* Threshold for good points in filter */ { int resNumCorrPoints = 0; CvPoint2D32f* cornerPoints1 = 0; CvPoint2D32f* cornerPoints2 = 0; char* status = 0; float* errors = 0; CvMat* tmpPoints1 = 0; CvMat* tmpPoints2 = 0; CvMat* pStatus = 0; IplImage *grayImage1 = 0; IplImage *grayImage2 = 0; IplImage *pyrImage1 = 0; IplImage *pyrImage2 = 0; CV_FUNCNAME( "icvFindCorrForGivenPoints" ); __BEGIN__; /* Test input data for errors */ /* Test for null pointers */ if( image1 == 0 || image2 == 0 || points1 == 0 || points2 == 0 || pntStatus1 == 0 || pntStatus2 == 0) { CV_ERROR( CV_StsNullPtr, "Some of parameters is a NULL pointer" ); } /* Test image size */ int w,h; w = image1->width; h = image1->height; if( w <= 0 || h <= 0) { CV_ERROR( CV_StsOutOfRange, "Size of image1 must be > 0" ); } if( image2->width != w || image2->height != h ) { CV_ERROR( CV_StsUnmatchedSizes, "Size of images must be the same" ); } /* Test for matrices */ if( !CV_IS_MAT(points1) || !CV_IS_MAT(points2) || !CV_IS_MAT(pntStatus1) || !CV_IS_MAT(pntStatus2) ) { CV_ERROR( CV_StsUnsupportedFormat, "Input parameters (points and status) must be a matrices" ); } /* Test type of status matrices */ if( !CV_IS_MASK_ARR(pntStatus1) || !CV_IS_MASK_ARR(pntStatus2) ) { CV_ERROR( CV_StsUnsupportedFormat, "Statuses must be a mask arrays" ); } /* Test number of points */ int numPoints; numPoints = points1->cols; if( numPoints <= 0 ) { CV_ERROR( CV_StsOutOfRange, "Number of points1 must be > 0" ); } if( points2->cols != numPoints || pntStatus1->cols != numPoints || pntStatus2->cols != numPoints ) { CV_ERROR( CV_StsUnmatchedSizes, "Number of points and statuses must be the same" ); } if( points1->rows != 2 || points2->rows != 2 ) { CV_ERROR( CV_StsOutOfRange, "Number of points coordinates must be 2" ); } if( pntStatus1->rows != 1 || pntStatus2->rows != 1 ) { CV_ERROR( CV_StsOutOfRange, "Status must be a matrix 1xN" ); } /* ----- End test ----- */ /* Compute number of visible points on image1 */ int numVisPoints; numVisPoints = cvCountNonZero(pntStatus1); if( numVisPoints > 0 ) { /* Create temporary images */ /* We must use iplImage againts hughgui images */ /* CvvImage grayImage1; CvvImage grayImage2; CvvImage pyrImage1; CvvImage pyrImage2; */ /* Create Ipl images */ CV_CALL( grayImage1 = cvCreateImage(cvSize(w,h),8,1) ); CV_CALL( grayImage2 = cvCreateImage(cvSize(w,h),8,1) ); CV_CALL( pyrImage1 = cvCreateImage(cvSize(w,h),8,1) ); CV_CALL( pyrImage2 = cvCreateImage(cvSize(w,h),8,1) ); CV_CALL( cornerPoints1 = (CvPoint2D32f*)cvAlloc( sizeof(CvPoint2D32f)*numVisPoints) ); CV_CALL( cornerPoints2 = (CvPoint2D32f*)cvAlloc( sizeof(CvPoint2D32f)*numVisPoints) ); CV_CALL( status = (char*)cvAlloc( sizeof(char)*numVisPoints) ); CV_CALL( errors = (float*)cvAlloc( 2 * sizeof(float)*numVisPoints) ); int i; for( i = 0; i < numVisPoints; i++ ) { status[i] = 1; } /* !!! Need test creation errors */ /* if( !grayImage1.Create(w,h,8)) EXIT; if( !grayImage2.Create(w,h,8)) EXIT; if( !pyrImage1. Create(w,h,8)) EXIT; if( !pyrImage2. Create(w,h,8)) EXIT; */ cvCvtColor(image1,grayImage1,CV_BGR2GRAY); cvCvtColor(image2,grayImage2,CV_BGR2GRAY); /* grayImage1.CopyOf(image1,0); grayImage2.CopyOf(image2,0); */ /* Copy points good points from input data */ uchar *stat1 = pntStatus1->data.ptr; uchar *stat2 = pntStatus2->data.ptr; int curr = 0; for( i = 0; i < numPoints; i++ ) { if( stat1[i] ) { cornerPoints1[curr].x = (float)cvmGet(points1,0,i); cornerPoints1[curr].y = (float)cvmGet(points1,1,i); curr++; } } /* Define number of levels of pyramid */ cvCalcOpticalFlowPyrLK( grayImage1, grayImage2, pyrImage1, pyrImage2, cornerPoints1, cornerPoints2, numVisPoints, cvSize(10,10), 3, status, errors, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), 0/*CV_LKFLOW_PYR_A_READY*/ ); memset(stat2,0,sizeof(uchar)*numPoints); int currVis = 0; int totalCorns = 0; /* Copy new points and set status */ /* stat1 may not be the same as stat2 */ for( i = 0; i < numPoints; i++ ) { if( stat1[i] ) { if( status[currVis] && errors[currVis] < 1000 ) { stat2[i] = 1; cvmSet(points2,0,i,cornerPoints2[currVis].x); cvmSet(points2,1,i,cornerPoints2[currVis].y); totalCorns++; } currVis++; } } resNumCorrPoints = totalCorns; /* Filter points using RANSAC */ if( useFilter ) { resNumCorrPoints = 0; /* Use RANSAC filter for found points */ if( totalCorns > 7 ) { /* Create array with good points only */ CV_CALL( tmpPoints1 = cvCreateMat(2,totalCorns,CV_64F) ); CV_CALL( tmpPoints2 = cvCreateMat(2,totalCorns,CV_64F) ); /* Copy just good points */ int currPoint = 0; for( i = 0; i < numPoints; i++ ) { if( stat2[i] ) { cvmSet(tmpPoints1,0,currPoint,cvmGet(points1,0,i)); cvmSet(tmpPoints1,1,currPoint,cvmGet(points1,1,i)); cvmSet(tmpPoints2,0,currPoint,cvmGet(points2,0,i)); cvmSet(tmpPoints2,1,currPoint,cvmGet(points2,1,i)); currPoint++; } } /* Compute fundamental matrix */ CvMat fundMatr; double fundMatr_dat[9]; fundMatr = cvMat(3,3,CV_64F,fundMatr_dat); CV_CALL( pStatus = cvCreateMat(1,totalCorns,CV_32F) ); int num = cvFindFundamentalMat(tmpPoints1,tmpPoints2,&fundMatr,CV_FM_RANSAC,threshold,0.99,pStatus); if( num > 0 ) { int curr = 0; /* Set final status for points2 */ for( i = 0; i < numPoints; i++ ) { if( stat2[i] ) { if( cvmGet(pStatus,0,curr) == 0 ) { stat2[i] = 0; } curr++; } } resNumCorrPoints = curr; } } } } __END__; /* Free allocated memory */ cvFree(&cornerPoints1); cvFree(&cornerPoints2); cvFree(&status); cvFree(&errors); cvFree(&tmpPoints1); cvFree(&tmpPoints2); cvReleaseMat( &pStatus ); cvReleaseImage( &grayImage1 ); cvReleaseImage( &grayImage2 ); cvReleaseImage( &pyrImage1 ); cvReleaseImage( &pyrImage2 ); return resNumCorrPoints; }
void imageCallback(const sensor_msgs::ImageConstPtr& msg) { //bridge that will transform the message (image) from ROS code back to "image" code sensor_msgs::CvBridge bridge; fprintf(stderr, "\n call Back funtion \n"); //publish data (obstacle waypoints) back to the boat ros::NodeHandle n; std_msgs::Float32 xWaypoint_msg; // X coordinate obstacle message object std_msgs::Float32 zWaypoint_msg; // Y coordinate obstacle message object //std::stringstream ss; //publish the waypoint data ros::Publisher Xwaypoint_info_pub = n.advertise<std_msgs::Float32>("Xwaypoint_info", 0.01); ros::Publisher Zwaypoint_info_pub = n.advertise<std_msgs::Float32>("Zwaypoint_info", 0.01); /***********************************************************************/ //live image coming streamed straight from the boat's camera IplImage* boatFront = bridge.imgMsgToCv(msg, "bgr8"); //The boat takes flipped images, so you need to flip them back to normal cvFlip(boatFront, boatFront, 0); IplImage* backUpImage = cvCloneImage(boatFront); boatFront->origin = IPL_ORIGIN_TL; //sets image origin to top left corner int X = boatFront->height; int Y = boatFront->width; //cout << "height " << X << endl; //cout << "width " << Y << endl; /*********************Image Filtering variables****************************/ //these images are used for segmenting objects from the overall background //create a one channel image to convert from RGB to GRAY IplImage* grayImage = cvCreateImage(cvGetSize(boatFront),IPL_DEPTH_8U,1); //convert grayImage to binary (final step after converting from GRAY) IplImage* bwImage = cvCreateImage(cvGetSize(grayImage),IPL_DEPTH_8U,1); //variables used for the flood fill segmentation CvPoint seed_point = cvPoint(boatFront->height/2 + 70,0); //not sure how this variable works CvScalar color = CV_RGB(250,0,0); CvMemStorage* grayStorage = NULL; //memory storage for contour sequence CvSeq* contours = 0; // get blobs and filter them using their area //IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL); //IplImage* original, *originalThr; //IplImage* segmentated = cvCreateImage(cvGetSize(boatFront), 8, 1); //unsigned int blobNumber = 0; //IplImage* labelImg = cvCreateImage(cvGetSize(boatFront), IPL_DEPTH_LABEL, 1); CvMoments moment; /***********************************************************************/ //boat's edge distance from the camera. This is used for visual calibration //to know the distance from the boat to the nearest obstacles. //With respect to the mounted camera, distance is 21 inches (0.5334 m) side to side //and 15 inches (0.381 m). //float boatFrontDistance = 0.381; //distance in meters //float boatSideDistance = 0.5334; //distance in meters // These variables tell the distance from the center bottom of the image // (the camera) to the square surrounding a the obstacle float obstacleDistance = 0.0; //Euclidean distance to object float obstacleHeading = 0.0; //distance variables from the camera calibration matrix int xPixel = 0; //pixels from left to right int yPixel = 0; //pixels from bottom to top float zObstacleDistance = 0; //object distance from the camera float xObstacleDistance = 0; float yObstacleDistance = 0.1143; //distance in meters from water to camera. //its gonna be constant assuming boat barely moves up and down in the water int pixelsNumber = 50; //number of pixels for an n x n matrix and # of neighbors const int arraySize = pixelsNumber; const int threeArraySize = pixelsNumber; //if n gets changed, then the algorithm might have to be //recalibrated. Try to keep it constant //these variables are used for the k nearest neighbors //int accuracy; //reponses for each of the classifications float responseWaterH, responseWaterS, responseWaterV; float responseGroundH, responseGroundS, responseGroundV; float responseSkyH, responseSkyS, responseSkyV; float averageHue = 0.0; float averageSat = 0.0; float averageVal = 0.0; CvMat* trainClasses = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample ); //used with the classifier CvMat* trainClassesH = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); CvMat* trainClassesS = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); CvMat* trainClassesV = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); //CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 ); //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample ); //used with the classifier /*CvMat* nearestWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* nearestSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1); //Distance CvMat* distanceWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1); CvMat* distanceSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1); */ //these variables are use to traverse the picture by blocks of n x n pixels at //a time. //Index(0,0) does not exist, so make sure kj and ki start from 1 (in the //right way, of course) //x and y are the dimensions of the local patch of pixels int x = (boatFront->height)/2 + 70;//(boatFront->height)/2.5 + 105; int y = 0; int skyX = 0; int skyY = 0; int row1 = 0; int column1 = 0; //these two variables are used in order to divide the grid in the //resample segmentation part int xDivisor = 200; int yDivisor = 200; //ground sample //CvMat* groundTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1); //CvMat* groundTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1); //CvMat* groundTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1); //water sample CvMat* waterTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* waterTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* waterTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1); //n x n sample patch taken from the picture CvMat* sampleHue = cvCreateMat(1,arraySize,CV_32FC1); CvMat* sampleSat = cvCreateMat(1,arraySize,CV_32FC1); CvMat* sampleVal = cvCreateMat(1,arraySize,CV_32FC1); CvMat* resampleHue0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* resampleSat0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* resampleVal0 = cvCreateMat(threeArraySize,arraySize,CV_32FC1); CvMat* resampleHue = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1); CvMat* resampleSat = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1); CvMat* resampleVal = cvCreateMat(boatFront->height/xDivisor,boatFront->width/yDivisor,CV_32FC1); int xDiv = 20; int yDiv = 20; CvMat* resampleHue2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1); CvMat* resampleSat2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1); CvMat* resampleVal2 = cvCreateMat(boatFront->height/xDiv,boatFront->width/yDiv,CV_32FC1); //sky training sample CvMat* skyTrainingHue = cvCreateMat(arraySize,arraySize,CV_32FC1); CvMat* skyTrainingSat = cvCreateMat(arraySize,arraySize,CV_32FC1); CvMat* skyTrainingVal = cvCreateMat(arraySize,arraySize,CV_32FC1); //initialize each matrix element to zero for ease of use //cvZero(groundTrainingHue); //cvZero(groundTrainingSat); //cvZero(groundTrainingVal); cvZero(waterTrainingHue); cvZero(waterTrainingSat); cvZero(waterTrainingVal); cvZero(sampleHue); cvZero(sampleSat); cvZero(sampleVal); cvZero(resampleHue0); cvZero(resampleSat0); cvZero(resampleVal0); cvZero(resampleHue); cvZero(resampleSat); cvZero(resampleVal); cvZero(skyTrainingHue); cvZero(skyTrainingSat); cvZero(skyTrainingVal); //Stores the votes for each channel (whether it belongs to water or not //1 is part of water, 0 not part of water //if sum of votes is bigger than 1/2 the number of elements, then it belongs to water int votesSum = 0; int comparator[3]; //used when only three votes are needed //int comparatorTwo [3][3]; //used when six votes are needed //initial sum of votes is zero //Error if initialize both matrices inside a single for loop. Dont know why for(int i = 0; i < 3; i++) { comparator[i] = 0; } /***********************************************************************/ //Convert from RGB to HSV to control the brightness of the objects. //work with reflexion /*Sky recognition. Might be useful for detecting reflexion on the water. If the sky is detected, and the reflection has the same characteristics of something below the horizon, that "something" might be water. Assume sky wont go below the horizon */ //convert from RGB to HSV cvCvtColor(boatFront, boatFront, CV_BGR2HSV); cvCvtColor(backUpImage, backUpImage, CV_BGR2HSV); HsvImage I(boatFront); HsvImage IBackUp(backUpImage); //Sky detection /* for (int i=0; i<boatFront->height;i++) { for (int j=0; j<boatFront->width;j++) { //if something is bright enough, consider it sky and store the //value. HSV values go from 0 to 180 ... RGB goes from 0 to 255 if (((I[i][j].v >= 180) && (I[i][j].s <= 16))) // && ((I[i][j].h >=10)))) //&& (I[i][j].h <= 144)))) { //The HSV values vary between 0 and 1 cvmSet(skyTrainingHue,skyX,skyY,I[i][j].h); cvmSet(skyTrainingSat,skyX,skyY,I[i][j].s); cvmSet(skyTrainingVal,skyX,skyY,I[i][j].v); //I[i][j].h = 0.3*180; //H (color) //I[i][j].s = 0.3*180; //S (color intensity) //I[i][j].v = 0.6*180; //V (brightness) if (skyY == pixelsNumber-1) { if (skyX == pixelsNumber-1) skyX = 1; else skyX = skyX + 1; skyY = 1; } else skyY = skyY + 1; } } } /***********************************************************************/ //offline input pictures. Samples of water properties are taken from these //pictures to get a range of values for H, S, V that will be stored into a //pre-defined classifier IplImage* imageSample1 = cvLoadImage("20110805_032255.jpg"); cvSetImageROI(imageSample1, cvRect(0,0,imageSample1->height/0.5,imageSample1->width/1.83)); cvCvtColor(imageSample1, imageSample1, CV_BGR2HSV); HsvImage I1(imageSample1); IplImage* imageSample2 = cvLoadImage("20110805_032257.jpg"); cvCvtColor(imageSample2, imageSample2, CV_BGR2HSV); HsvImage I2(imageSample2); IplImage* imageSample3 = cvLoadImage("20110805_032259.jpg"); cvCvtColor(imageSample3, imageSample3, CV_BGR2HSV); HsvImage I3(imageSample3); IplImage* imageSample4 = cvLoadImage("20110805_032301.jpg"); cvCvtColor(imageSample4, imageSample4, CV_BGR2HSV); HsvImage I4(imageSample4); IplImage* imageSample5 = cvLoadImage("20110805_032303.jpg"); cvCvtColor(imageSample5, imageSample5, CV_BGR2HSV); HsvImage I5(imageSample5); IplImage* imageSample6 = cvLoadImage("20110805_032953.jpg"); cvCvtColor(imageSample6, imageSample6, CV_BGR2HSV); HsvImage I6(imageSample6); IplImage* imageSample7 = cvLoadImage("20110805_032955.jpg"); cvCvtColor(imageSample7, imageSample7, CV_BGR2HSV); HsvImage I7(imageSample7); IplImage* imageSample8 = cvLoadImage("20110805_032957.jpg"); cvCvtColor(imageSample8, imageSample8, CV_BGR2HSV); HsvImage I8(imageSample8); IplImage* imageSample9 = cvLoadImage("20110805_032959.jpg"); cvCvtColor(imageSample9, imageSample9, CV_BGR2HSV); HsvImage I9(imageSample9); IplImage* imageSample10 = cvLoadImage("20110805_033001.jpg"); cvCvtColor(imageSample10, imageSample10, CV_BGR2HSV); HsvImage I10(imageSample10); IplImage* imageSample11 = cvLoadImage("20110805_033009.jpg"); cvCvtColor(imageSample11, imageSample11, CV_BGR2HSV); HsvImage I11(imageSample11); IplImage* imageSample12 = cvLoadImage("20110805_033011.jpg"); cvCvtColor(imageSample12, imageSample12, CV_BGR2HSV); HsvImage I12(imageSample12); //IplImage* imageSample13 = cvLoadImage("20110812_110924.jpg"); //cvCvtColor(imageSample13, imageSample13, CV_BGR2HSV); //HsvImage I13(imageSample13); for (int i=0; i < threeArraySize; i++) { for (int j=0; j < arraySize; j++) { row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8); column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615); averageHue = (I1[row1][column1].h + I2[row1][column1].h + I3[row1][column1].h + I4[row1][column1].h + I5[row1][column1].h + I6[row1][column1].h + I7[row1][column1].h + I8[row1][column1].h + I9[row1][column1].h + I10[row1][column1].h + I11[row1][column1].h + I12[row1][column1].h) / 12; averageSat = (I1[row1][column1].s + I2[row1][column1].s + I3[row1][column1].s + I4[row1][column1].s + I5[row1][column1].s + I6[row1][column1].s + I7[row1][column1].s + I8[row1][column1].s + I9[row1][column1].s + I10[row1][column1].s + I11[row1][column1].s + I12[row1][column1].s) / 12; averageVal = (I1[row1][column1].v + I2[row1][column1].v + I3[row1][column1].v + I4[row1][column1].v + I5[row1][column1].v + I6[row1][column1].v + I7[row1][column1].v + I8[row1][column1].v + I9[row1][column1].v + I10[row1][column1].v + I11[row1][column1].v + I12[row1][column1].v) / 12; //water patch sample (n X n matrix) cvmSet(waterTrainingHue,i,j,averageHue); cvmSet(waterTrainingSat,i,j,averageSat); cvmSet(waterTrainingVal,i,j,averageVal); //patch is red (this is for me to know where the ground patch sample is) //I[row1][column1].h = 0; //I[row1][column1].s = 255; //I[row1][column1].v = 255; } } //creating a training sample from the an image taken on the fly row1 = 0; column1 = 0; for (int i=0; i<pixelsNumber; i++) { for (int j=0; j<pixelsNumber; j++) { row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8); column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615); cvmSet(trainClassesH,i,0,I[row1][column1].h); cvmSet(trainClassesS,i,0,I[row1][column1].s); cvmSet(trainClassesV,i,0,I[row1][column1].v); } } //order the water samples in ascending order on order to know a range cvSort(waterTrainingHue, waterTrainingHue, CV_SORT_ASCENDING); cvSort(waterTrainingSat, waterTrainingSat, CV_SORT_ASCENDING); cvSort(waterTrainingVal, waterTrainingVal, CV_SORT_ASCENDING); // find the maximum and minimum values in the array to create a range int maxH = cvmGet(waterTrainingHue,0,0); int maxS = cvmGet(waterTrainingSat,0,0); int maxV = cvmGet(waterTrainingVal,0,0); int minH = cvmGet(waterTrainingHue,0,0); int minS = cvmGet(waterTrainingSat,0,0); int minV = cvmGet(waterTrainingVal,0,0); for (int i=0; i < threeArraySize; i++) { for (int j=0; j < arraySize; j++) { if (cvmGet(waterTrainingHue,i,j) > maxH) maxH = cvmGet(waterTrainingHue,i,j); if (cvmGet(waterTrainingSat,i,j) > maxS) maxS = cvmGet(waterTrainingSat,i,j); if (cvmGet(waterTrainingVal,i,j) > maxV) maxV = cvmGet(waterTrainingVal,i,j); if (cvmGet(waterTrainingHue,i,j) < minH) minH = cvmGet(waterTrainingHue,i,j); if (cvmGet(waterTrainingSat,i,j) < minS) minS = cvmGet(waterTrainingSat,i,j); if (cvmGet(waterTrainingVal,i,j) < minV) minV = cvmGet(waterTrainingVal,i,j); } } /*********** Main loop. It traverses through the picture**********/ /******************** Live water samples *******************************/ //learn how "current water" looks like on the fly row1 = 0; column1 = 0; for (int i=0; i<pixelsNumber; i++) { for (int j=0; j<pixelsNumber; j++) { //front of boat might appear in the image. Account for that row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455) + ceil(X/4.8) - 55; column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615); cvmSet(resampleHue0,i,j,I[row1][column1].h); cvmSet(resampleSat0,i,j,I[row1][column1].s); cvmSet(resampleVal0,i,j,I[row1][column1].v); //visualize "resample" patch // I[row1][column1].h = 0; //I[row1][column1].s = 0; //I[row1][column1].v = 0; } } //order the water samples in ascending order on order to know a range cvSort(resampleHue0, resampleHue0, CV_SORT_ASCENDING); cvSort(resampleSat0, resampleSat0, CV_SORT_ASCENDING); cvSort(resampleVal0, resampleVal0, CV_SORT_ASCENDING); // find the maximum and minimum values in the array to create a range int maxH0 = cvmGet(resampleHue0,0,0); int maxS0 = cvmGet(resampleSat0,0,0); int maxV0 = cvmGet(resampleVal0,0,0); int minH0 = cvmGet(resampleHue0,0,0); int minS0 = cvmGet(resampleSat0,0,0); int minV0 = cvmGet(resampleVal0,0,0); for (int i=0; i < threeArraySize; i++) { for (int j=0; j < arraySize; j++) { if (cvmGet(resampleHue0,i,j) > maxH0) maxH0 = cvmGet(resampleHue0,i,j); if (cvmGet(resampleSat0,i,j) > maxS0) maxS0 = cvmGet(resampleSat0,i,j); if (cvmGet(resampleVal0,i,j) > maxV0) maxV0 = cvmGet(resampleVal0,i,j); if (cvmGet(resampleHue0,i,j) < minH0) minH0 = cvmGet(resampleHue0,i,j); if (cvmGet(resampleSat0,i,j) < minS0) minS0 = cvmGet(resampleSat0,i,j); if (cvmGet(resampleVal0,i,j) < minV0) minV0 = cvmGet(resampleVal0,i,j); } } for(int i = 0; i < 3; i++) { comparator[i] = 0; } //int counter = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i<6;i++) { column1 = y+i; if (column1 > Y-1) column1 = Y-1; cvmSet(sampleHue,0,i,I[x][column1].h); cvmSet(sampleSat,0,i,I[x][column1].s); cvmSet(sampleVal,0,i,I[x][column1].v); } for (int i=0;i<6;i++) { for (int j=0;j<6;j++) { if ((minH0 < cvmGet(sampleHue,0,j)) && (maxH0 > cvmGet(sampleHue,0,j))) //mark water samples as green comparator[0] = 1; else comparator[0] = 0; if ((minS0 < cvmGet(sampleSat,0,j)) && (maxS0 > cvmGet(sampleSat,0,j))) //mark water samples as green comparator[1] = 1; else comparator[1] = 0; if ((minV0 < cvmGet(sampleVal,0,j)) && (maxV0 > cvmGet(sampleVal,0,j))) //mark water samples as red comparator[2] = 1; else comparator[2] = 0; //count votes for (int i3=0; i3 < 3; i3++) votesSum = votesSum + comparator[i3]; if (votesSum > 1) { //use the known water samples as new training data //if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor)) //{ // cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j)); // cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j)); // cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j)); //} //6 use to be equal to pixelsNumber. I[x][y-6+j].h = 0; I[x][y-6+j].s = 255; I[x][y-6+j].v = 255; } votesSum = 0; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } //ix = 0; } /********************************************************************* // Use nearest neighbors to increase accuracy skyX = 0; skyY = 0; while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i<6;i++) { column1 = y+i; if (column1 > Y-1) column1 = Y-1; cvmSet(sampleHue,0,i,I[x][column1].h); cvmSet(sampleSat,0,i,I[x][column1].s); cvmSet(sampleVal,0,i,I[x][column1].v); } //Find the shortest distance between a pixel and the neighbors from each of //the training samples (sort of inefficient, but might do the job...sometimes) //HSV for water sample // learn classifier //CvKNearest knn(trainData, trainClasses, 0, false, itemsNumber); CvKNearest knnWaterHue(waterTrainingHue, trainClassesH, 0, false, pixelsNumber); CvKNearest knnWaterSat(waterTrainingSat, trainClassesS, 0, false, pixelsNumber); CvKNearest knnWaterVal(waterTrainingVal, trainClassesV, 0, false, pixelsNumber); //HSV for ground sample //CvKNearest knnGroundHue(groundTrainingHue, trainClasses2, 0, false, pixelsNumber); //CvKNearest knnGroundSat(groundTrainingSat, trainClasses2, 0, false, pixelsNumber); //CvKNearest knnGroundVal(groundTrainingVal, trainClasses2, 0, false, pixelsNumber); //HSV for sky sample //if (cvmGet(skyTrainingHue,0,0)!=0.0 && cvmGet(skyTrainingSat,0,0)!=0.0 && cvmGet(skyTrainingVal,0,0)!=0.0) //{ // CvKNearest knnSkyHue(skyTrainingHue, trainClasses, 0, false, pixelsNumber); // CvKNearest knnSkySat(skyTrainingSat, trainClasses, 0, false, pixelsNumber); // CvKNearest knnSkyVal(skyTrainingVal, trainClasses, 0, false, pixelsNumber); //} //scan nearest neighbors to each pixel responseWaterH = knnWaterHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestWaterH,0); responseWaterS = knnWaterSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestWaterS,0); responseWaterV = knnWaterVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestWaterV,0); //responseGroundH = knnGroundHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestGroundH,0); //responseGroundS = knnGroundSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestGroundS,0); //responseGroundV = knnGroundVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestGroundV,0); //for (int i=0;i<pixelsNumber;i++) //{ for (int j=0;j<pixelsNumber;j++) { if ((nearestWaterH->data.fl[j] == responseWaterH) )//&& (nearestWaterH->data.fl[j] == responseWaterH + 5)) // mark water samples as green comparator[0] = 1; else comparator[0] = 0; if ((nearestWaterS->data.fl[j] == responseWaterS) )//&& (nearestWaterS->data.fl[j] < responseWaterS + 5)) //mark water samples as green comparator[1] = 1; else comparator[1] = 0; if ((nearestWaterV->data.fl[j] == responseWaterV) )//&& (nearestWaterV->data.fl[j] < responseWaterV + 5)) //mark water samples as green comparator[2] = 1; else comparator[2] = 0; // similar sky pixels on the water //count votes for (int i3=0; i3 < 3; i3++) votesSum = votesSum + comparator[i3]; if (votesSum > 1) { I[x][y-6+j].h = 0; I[x][y-6+j].s = 255; I[x][y-6+j].v = 255; } votesSum = 0; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } // ix = 0; } /*********************************************************************/ for(int i = 0; i < 3; i++) { comparator[i] = 0; } //int counter = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i<6;i++) { column1 = y+i; if (column1 > Y-1) column1 = Y-1; cvmSet(sampleHue,0,i,I[x][column1].h); cvmSet(sampleSat,0,i,I[x][column1].s); cvmSet(sampleVal,0,i,I[x][column1].v); } for (int i=0;i<6;i++) { for (int j=0;j<6;j++) { if ((minH < cvmGet(sampleHue,0,j)) && (maxH > cvmGet(sampleHue,0,j))) //mark water samples as green comparator[0] = 1; else comparator[0] = 0; if ((minS < cvmGet(sampleSat,0,j)) && (maxS > cvmGet(sampleSat,0,j))) //mark water samples as green comparator[1] = 1; else comparator[1] = 0; if ((minV < cvmGet(sampleVal,0,j)) && (maxV > cvmGet(sampleVal,0,j))) //mark water samples as red comparator[2] = 1; else comparator[2] = 0; //count votes for (int i3=0; i3 < 3; i3++) votesSum = votesSum + comparator[i3]; if (votesSum > 1) { //use the known water samples as new training data if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor)) { cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j)); cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j)); cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j)); } //6 use to be equal to pixelsNumber. I[x][y-6+j].h = 0; I[x][y-6+j].s = 255; I[x][y-6+j].v = 255; } votesSum = 0; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } //ix = 0; } /***************Deal with reflection*****************/ for(int i = 0; i < 3; i++) { comparator[i] = 0; } //int counter = 0; votesSum = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i<6;i++) { column1 = y+i; if (column1 > Y-1) column1 = Y-1; cvmSet(sampleHue,0,i,I[x][column1].h); cvmSet(sampleSat,0,i,I[x][column1].s); cvmSet(sampleVal,0,i,I[x][column1].v); } for (int i=0;i<6;i++) { for (int j=0;j<6;j++) { if ((minH < cvmGet(sampleHue,0,j)) && (maxH > cvmGet(sampleHue,0,j))) //mark water samples as green comparator[0] = 1; else comparator[0] = 0; if ((0.8*255 > cvmGet(sampleSat,0,j)))// && (maxS < cvmGet(sampleSat,0,j))) //mark water samples as green comparator[1] = 1; else comparator[1] = 0; if ((0.6*255 < cvmGet(sampleVal,0,j)))// || (maxV < cvmGet(sampleVal,0,j))) //mark water samples as green comparator[2] = 1; else comparator[2] = 0; //count votes for (int i3=0; i3 < 3; i3++) votesSum = votesSum + comparator[i3]; if (votesSum > 1) { //use the known water samples as new training data if((i<boatFront->height/xDivisor) && (j<boatFront->width/yDivisor)) { cvmSet(resampleHue,i,j,cvmGet(sampleHue,0,j)); cvmSet(resampleSat,i,j,cvmGet(sampleSat,0,j)); cvmSet(resampleVal,i,j,cvmGet(sampleVal,0,j)); } //6 use to be equal to pixelsNumber. I[x][y-6+j].h = 0; I[x][y-6+j].s = 255; I[x][y-6+j].v = 255; } votesSum = 0; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } //ix = 0; } /**********Resample the entire patch**********/ /*********find a new min and max for a new sample range*************/ for(int i = 0; i < 3; i++) { comparator[i] = 0; } //int counter = 0; votesSum = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; maxH = cvmGet(resampleHue,0,0); maxS = cvmGet(resampleSat,0,0); maxV = cvmGet(resampleVal,0,0); minH = cvmGet(resampleHue,0,0); minS = cvmGet(resampleSat,0,0); minV = cvmGet(resampleVal,0,0); for (int i=0; i < boatFront->height/xDivisor; i++) { for (int j=0; j < boatFront->width/yDivisor; j++) { if (cvmGet(resampleHue,i,j) > maxH) maxH = cvmGet(resampleHue,i,j); if (cvmGet(resampleSat,i,j) > maxS) maxS = cvmGet(resampleSat,i,j); if (cvmGet(resampleVal,i,j) > maxV) maxV = cvmGet(resampleVal,i,j); if (cvmGet(resampleHue,i,j) < minH) minH = cvmGet(resampleHue,i,j); if (cvmGet(resampleSat,i,j) < minS) minS = cvmGet(resampleSat,i,j); if (cvmGet(resampleVal,i,j) < minV) minV = cvmGet(resampleVal,i,j); } } while (x < X-1) { for (int i=0;i<6;i++) { for (int j=0;j<6;j++) { if ((minH < I[x][y-6+j].h) && (maxH > I[x][y-6+j].h)) //mark water samples as red I[x][y-6+j].h = 0; else comparator[0] = 0; if ((minS < I[x][y-6+j].s) && (maxS > I[x][y-6+j].s)) //mark water samples as red I[x][y-6+j].s = 255; else comparator[1] = 0; if ((minV < I[x][y-6+j].v) && (maxV > I[x][y-6+j].v)) //mark water samples as red I[x][y-6+j].v = 255; } } if (y < Y-1) //5 use to be equal to pixelsNumber-1. y = y + 5; if (y > Y-1) y = Y-1; else if (y == Y-1) { //5 use to be equal to pixelsNumber-1 x = x + 1; y = 0; } } //cout << "Sample data from current images" << endl; //for (int i = 0; i<20;i++) //{ // cout << "HUE: " << cvmGet(sampleHue,0,i) << endl; // cout << "Saturation: " << cvmGet(sampleSat,0,i) << endl; // cout << "Value: " << cvmGet(sampleVal,0,i) << endl; //} //traverse through the image one more time, divide the image in grids of // 500x500 pixels, and see how many pixels of water are in each grid. If // most of the pixels are labeled water, then mark all the other pixels // as water as well //int counter = 0; votesSum = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 70; y = 0; /***************Divide the picture in cells for filtering**********/ while (x < X-1) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i < boatFront->height/xDivisor; i++) { for(int j = 0; j < boatFront->width/yDivisor; j++) { cvmSet(resampleHue2,i,j,I[x+i][y+j].h); cvmSet(resampleSat2,i,j,I[x+i][y+j].s); cvmSet(resampleVal2,i,j,I[x+i][y+j].v); if(cvmGet(resampleHue2,i,j)==0 && cvmGet(resampleSat2,i,j)==255 && cvmGet(resampleVal2,i,j)==255) { votesSum++; } } } if (votesSum > (((boatFront->height/xDivisor)*(boatFront->width/yDivisor))*(4/5))) { // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water // We might need to use other smaller quantities (like 5/6 maybe?) for (int i = 0; i < boatFront->height/xDivisor;i++) { for (int j = 0; j < boatFront->width/yDivisor; j++) { row1 = x + i; if (row1 > X-1) row1 = X-1; column1 = y+j; if (column1 > Y-1) column1 = Y-1; I[row1][column1].h = 0; I[row1][column1].s = 255; I[row1][column1].v = 255; } } } else { // If not water, eliminate all red pixels and turn those pixels // back to the original color for (int i = 0; i < boatFront->height/xDivisor;i++) { for (int j = 0; j < boatFront->width/yDivisor; j++) { row1 = x + i; if (row1 > X-1) row1 = X-1; column1 = y+j; if (column1 > Y-1) column1 = Y-1; I[row1][column1].h = IBackUp[row1][column1].h;//255;//IBackUp[row1][column1].h; I[row1][column1].s = IBackUp[row1][column1].s;//255;//IBackUp[row1][column1].s; I[row1][column1].v = IBackUp[row1][column1].v;//255;//IBackUp[row1][column1].v; } } } y = y + boatFront->width/xDivisor; if (y > Y-1) { x = x + boatFront->height/yDivisor; y = 0; } votesSum = 0; } /********************Isolate obstacles************************/ votesSum = 0; int paint = 0; column1 = 0; row1 = 0; x = boatFront->height/2 + 40; y = 0; xDiv = 40; yDiv = 40; /***************Divide the picture in cells for filtering**********/ // Small pixel areas (noise) are going to be eliminated from the picture // living only the big obstacles while (x < X-2) { //get a random sample taken from the picture. Must be determined whether //it is water or ground for (int i = 0; i < boatFront->height/xDiv; i++) { for(int j = 0; j < boatFront->width/yDiv; j++) { row1 = x + i; if (row1 > X-2) row1 = X-2; column1 = y+j; if (column1 > Y-1) column1 = Y-1; cvmSet(resampleHue2,i,j,I[row1][column1].h); cvmSet(resampleSat2,i,j,I[row1][column1].s); cvmSet(resampleVal2,i,j,I[row1][column1].v); if(cvmGet(resampleHue2,i,j)==0 && cvmGet(resampleSat2,i,j)==255 && cvmGet(resampleVal2,i,j)==255) { votesSum++; } } } if (votesSum > (((boatFront->height/xDiv)*(boatFront->width/yDiv))*(4/5))) { // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water // We might need to use other smaller quantities (like 5/6 maybe?) for (int i = 0; i < boatFront->height/xDiv;i++) { for (int j = 0; j < boatFront->width/yDiv; j++) { row1 = x + i; if (row1 > X-2) row1 = X-2; column1 = y+j; if (column1 > Y-1) column1 = Y-1; I[row1][column1].h = 0; I[row1][column1].s = 255; I[row1][column1].v = 255; } } } else { int count = 0; // If not water, eliminate all red pixels and turn those pixels // back to the original color for (int i = 0; i < boatFront->height/xDiv;i++) { for (int j = 0; j < boatFront->width/yDiv; j++) { row1 = x + i; if (row1 > X-2) row1 = X-2; column1 = y+j; if (column1 > Y-1) column1 = Y-1; I[row1][column1].h = IBackUp[row1][column1].h;//255; I[row1][column1].s = IBackUp[row1][column1].s;//255; I[row1][column1].v = IBackUp[row1][column1].v;//255; // count++; } } } y = y + boatFront->width/yDiv; if (y > Y-1) { x = x + boatFront->height/xDiv; if (x > X-2) x = X-2; y = 0; } votesSum = 0; } /****************Find Obstacles boundaries*********************************/ if( grayStorage == NULL ) { grayStorage = cvCreateMemStorage(0); } else { cvClearMemStorage(grayStorage); } //backUpImage = cvCloneImage(boatFront); //IBackUp(backUpImage); //Ignore unused parts of the image and convert them to black for (int i=0; i<backUpImage->height;i++) { for (int j=0; j<backUpImage->width;j++) { if(i < backUpImage->height/2 + 70) { IBackUp[i][j].h = 0; IBackUp[i][j].s = 0; IBackUp[i][j].v = 0; } else { IBackUp[i][j].h = I[i][j].h; IBackUp[i][j].s = I[i][j].s; IBackUp[i][j].v = I[i][j].v; } } } //convert from HSV to RGB cvCvtColor(boatFront, boatFront, CV_HSV2BGR); cvCvtColor(backUpImage, backUpImage, CV_HSV2BGR); //do flood fill for obstacles cvFloodFill( backUpImage, seed_point, color, cvScalarAll(255), cvScalarAll(2), NULL, 8, NULL); //convert to to gray to do more obstacle segmentation cvCvtColor(backUpImage, grayImage, CV_BGR2GRAY); //convert to binary cvThreshold(grayImage, bwImage, 100, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); //eliminate small unnecessary pixel areas //bwImage is a pointer, so no need to reuse findCountours int findCountours = bwareaopen_(bwImage, 100); //find contours of obstacles in image cvFindContours(bwImage, grayStorage, &contours); cvZero( bwImage ); //redraw clean contours for( CvSeq* c=contours; c!=NULL; c=c->h_next) { cvDrawContours(bwImage, c, cvScalarAll(255), cvScalarAll(255), 8); //ignore obstacles/contours with are less than 100 pixels or bigger than 100000 pixels if ((cvContourArea(c, CV_WHOLE_SEQ) >= 60) && (cvContourArea(c, CV_WHOLE_SEQ) <= 100000)) { cout << "Contour area: " << cvContourArea(c, CV_WHOLE_SEQ) << endl; //area in pixels //find the x,y coordinate of the center of a contour cvMoments(c, &moment, 0); //centroid/moment of the contour/obstacle cout << "Contour center in x,y: " << moment.m10/moment.m00 << ", " << moment.m01/moment.m00 << endl; //The distance formula calculated by plotting points is given by: // Xc/Zc = Xp-cc(1)/Fc(1) // Yc/Zc = Yp-cc(2)/Fc(2) //For boat one Yc = 4.5 inches = 0.0635 meters //These formulas only work for 640X480 images // x,y coordinates of the obstacle from the bottom center of the image //Ignore everything less than 0.3 meters apart (anything too close to the boat) zObstacleDistance = 5*(yObstacleDistance*619.33108)/(X - (moment.m01/moment.m00)); xObstacleDistance = 5*zObstacleDistance*((moment.m10/moment.m00)-324.36738)/618.62586; //copy data to be published xWaypoint_msg.data = xObstacleDistance; zWaypoint_msg.data = zObstacleDistance; //publish data Xwaypoint_info_pub.publish(xWaypoint_msg); Zwaypoint_info_pub.publish(zWaypoint_msg); //try to ignore obstacle that are too close. Robot shall tell operator if there is //a problem with a close by obstacle //obstacle distance obstacleDistance = sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2) + pow(zObstacleDistance,2)); //Just use the 2D angle obstacleHeading = tan((zObstacleDistance/xObstacleDistance)*PI/180); cout << "Obstacle polar coordinates: " << endl; cout << "z: " << zObstacleDistance << " x: " << xObstacleDistance << endl; cout << "Distance (meters) " << obstacleDistance << endl; cout << "Direction (degrees): " << obstacleHeading << endl << endl; } } /**************************************************************************/ //deal with memory management. How do I get read of the arrays and pointers I am not using inside the callback function?????? try { //fprintf(stderr,"\n boatFront\n"); cvShowImage("Boat Front", backUpImage); //cvShowImage("Color Segment", backUpImage); //cvShowImage("Obstacles", bwImage); } catch (sensor_msgs::CvBridgeException& e) { ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str()); } }
static void icvTrueDistTrans( const CvMat* src, CvMat* dst ) { CvMat* buffer = 0; CV_FUNCNAME( "cvDistTransform2" ); __BEGIN__; int i, m, n; int sstep, dstep; const float inf = 1e6f; int thread_count = cvGetNumThreads(); int pass1_sz, pass2_sz; if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( CV_MAT_TYPE(src->type) != CV_8UC1 || CV_MAT_TYPE(dst->type) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "The input image must have 8uC1 type and the output one must have 32fC1 type" ); m = src->rows; n = src->cols; // (see stage 1 below): // sqr_tab: 2*m, sat_tab: 3*m + 1, d: m*thread_count, pass1_sz = src->rows*(5 + thread_count) + 1; // (see stage 2): // sqr_tab & inv_tab: n each; f & v: n*thread_count each; z: (n+1)*thread_count pass2_sz = src->cols*(2 + thread_count*3) + thread_count; CV_CALL( buffer = cvCreateMat( 1, MAX(pass1_sz, pass2_sz), CV_32FC1 )); sstep = src->step; dstep = dst->step / sizeof(float); // stage 1: compute 1d distance transform of each column { float* sqr_tab = buffer->data.fl; int* sat_tab = (int*)(sqr_tab + m*2); const int shift = m*2; for( i = 0; i < m; i++ ) sqr_tab[i] = (float)(i*i); for( i = m; i < m*2; i++ ) sqr_tab[i] = inf; for( i = 0; i < shift; i++ ) sat_tab[i] = 0; for( ; i <= m*3; i++ ) sat_tab[i] = i - shift; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_count) #endif for( i = 0; i < n; i++ ) { const uchar* sptr = src->data.ptr + i + (m-1)*sstep; float* dptr = dst->data.fl + i; int* d = (int*)(sat_tab + m*3+1+m*cvGetThreadNum()); int j, dist = m-1; for( j = m-1; j >= 0; j--, sptr -= sstep ) { dist = (dist + 1) & (sptr[0] == 0 ? 0 : -1); d[j] = dist; } dist = m-1; for( j = 0; j < m; j++, dptr += dstep ) { dist = dist + 1 - sat_tab[dist + 1 - d[j] + shift]; d[j] = dist; dptr[0] = sqr_tab[dist]; } } } // stage 2: compute modified distance transform for each row { float* inv_tab = buffer->data.fl; float* sqr_tab = inv_tab + n; inv_tab[0] = sqr_tab[0] = 0.f; for( i = 1; i < n; i++ ) { inv_tab[i] = (float)(0.5/i); sqr_tab[i] = (float)(i*i); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_count), schedule(dynamic) #endif for( i = 0; i < m; i++ ) { float* d = (float*)(dst->data.ptr + i*dst->step); float* f = sqr_tab + n + (n*3+1)*cvGetThreadNum(); float* z = f + n; int* v = (int*)(z + n + 1); int p, q, k; v[0] = 0; z[0] = -inf; z[1] = inf; f[0] = d[0]; for( q = 1, k = 0; q < n; q++ ) { float fq = d[q]; f[q] = fq; for(;;k--) { p = v[k]; float s = (fq + sqr_tab[q] - d[p] - sqr_tab[p])*inv_tab[q - p]; if( s > z[k] ) { k++; v[k] = q; z[k] = s; z[k+1] = inf; break; } } } for( q = 0, k = 0; q < n; q++ ) { while( z[k+1] < q ) k++; p = v[k]; d[q] = sqr_tab[abs(q - p)] + f[p]; } } } cvPow( dst, dst, 0.5 ); __END__; cvReleaseMat( &buffer ); }
void AAM_Train::shape_pca() { int dimention=2; double resudial=0.98; int ptsNum=shape[0]->ptsNum*dimention; CvMat *pData=cvCreateMat(shapeNum,ptsNum,CV_64FC1); for (int i=0;i<shapeNum;i++) { for (int j=0;j<ptsNum;j++) { //CV_MAT_ELEM(*pData,double,i,j)=shape[i]->ptsForMatlab[j]; //here,we keep the shape in the same scale with the meanshape CV_MAT_ELEM(*pData,double,i,j)=shape[i]->ptsForMatlab[j]; } } s_mean = cvCreateMat(1, ptsNum, CV_64FC1); s_value = cvCreateMat(1, min(shapeNum,ptsNum), CV_64FC1); CvMat *s_PCAvec = cvCreateMat( min(shapeNum,ptsNum), ptsNum, CV_64FC1); cvCalcPCA( pData, s_mean, s_value, s_PCAvec, CV_PCA_DATA_AS_ROW ); double sumEigVal=0; for (int i=0;i<s_value->cols;i++) { sumEigVal+=CV_MAT_ELEM(*s_value,double,0,i); } double sumCur=0; for (int i=0;i<s_value->cols;i++) { sumCur+=CV_MAT_ELEM(*s_value,double,0,i); if (sumCur/sumEigVal>=resudial) { shape_dim=i+1; break; } } //if consider global transform, we will add another 4 shape vector and orthamized? if (isGlobaltransform) { s_vec=cvCreateMat(shape_dim+4,ptsNum,CV_64FC1); for (int i=0;i<shape_dim;i++) { for (int j=0;j<ptsNum;j++) { CV_MAT_ELEM(*s_vec,double,i,j)=CV_MAT_ELEM(*s_PCAvec,double,i,j); } } //add the four shape vectors for (int j=0;j<ptsNum;j++) { CV_MAT_ELEM(*s_vec,double,shape_dim+3,j)=meanShape->ptsForMatlab[j]; if (j<ptsNum/2) { CV_MAT_ELEM(*s_vec,double,shape_dim+2,j)=-meanShape->ptsForMatlab[ptsNum/2+j]; CV_MAT_ELEM(*s_vec,double,shape_dim+1,j)=1; CV_MAT_ELEM(*s_vec,double,shape_dim+0,j)=0; } else { CV_MAT_ELEM(*s_vec,double,shape_dim+2,j)=meanShape->ptsForMatlab[j-ptsNum/2]; CV_MAT_ELEM(*s_vec,double,shape_dim+1,j)=0; CV_MAT_ELEM(*s_vec,double,shape_dim+0,j)=1; } }
/* Wrapper function for distance transform group */ CV_IMPL void cvDistTransform( const void* srcarr, void* dstarr, int distType, int maskSize, const float *mask, void* labelsarr ) { CvMat* temp = 0; CvMat* src_copy = 0; CvMemStorage* st = 0; CV_FUNCNAME( "cvDistTransform" ); __BEGIN__; float _mask[5] = {0}; int _imask[3]; CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; CvMat lstub, *labels = (CvMat*)labelsarr; CvSize size; CvIPPDistTransFunc ipp_func = 0; CvIPPDistTransFunc2 ipp_inp_func = 0; CV_CALL( src = cvGetMat( src, &srcstub )); CV_CALL( dst = cvGetMat( dst, &dststub )); if( !CV_IS_MASK_ARR( src ) || CV_MAT_TYPE( dst->type ) != CV_32FC1 && (CV_MAT_TYPE(dst->type) != CV_8UC1 || distType != CV_DIST_L1 || labels) ) CV_ERROR( CV_StsUnsupportedFormat, "source image must be 8uC1 and the distance map must be 32fC1 " "(or 8uC1 in case of simple L1 distance transform)" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "the source and the destination images must be of the same size" ); if( maskSize != CV_DIST_MASK_3 && maskSize != CV_DIST_MASK_5 && maskSize != CV_DIST_MASK_PRECISE ) CV_ERROR( CV_StsBadSize, "Mask size should be 3 or 5 or 0 (presize)" ); if( distType == CV_DIST_C || distType == CV_DIST_L1 ) maskSize = !labels ? CV_DIST_MASK_3 : CV_DIST_MASK_5; else if( distType == CV_DIST_L2 && labels ) maskSize = CV_DIST_MASK_5; if( maskSize == CV_DIST_MASK_PRECISE ) { CV_CALL( icvTrueDistTrans( src, dst )); EXIT; } if( labels ) { CV_CALL( labels = cvGetMat( labels, &lstub )); if( CV_MAT_TYPE( labels->type ) != CV_32SC1 ) CV_ERROR( CV_StsUnsupportedFormat, "the output array of labels must be 32sC1" ); if( !CV_ARE_SIZES_EQ( labels, dst )) CV_ERROR( CV_StsUnmatchedSizes, "the array of labels has a different size" ); if( maskSize == CV_DIST_MASK_3 ) CV_ERROR( CV_StsNotImplemented, "3x3 mask can not be used for \"labeled\" distance transform. Use 5x5 mask" ); } if( distType == CV_DIST_C || distType == CV_DIST_L1 || distType == CV_DIST_L2 ) { icvGetDistanceTransformMask( (distType == CV_DIST_C ? 0 : distType == CV_DIST_L1 ? 1 : 2) + maskSize*10, _mask ); } else if( distType == CV_DIST_USER ) { if( !mask ) CV_ERROR( CV_StsNullPtr, "" ); memcpy( _mask, mask, (maskSize/2 + 1)*sizeof(float)); } if( !labels ) { if( CV_MAT_TYPE(dst->type) == CV_32FC1 ) ipp_func = (CvIPPDistTransFunc)(maskSize == CV_DIST_MASK_3 ? icvDistanceTransform_3x3_8u32f_C1R_p : icvDistanceTransform_5x5_8u32f_C1R_p); else if( src->data.ptr != dst->data.ptr ) ipp_func = (CvIPPDistTransFunc)icvDistanceTransform_3x3_8u_C1R_p; else ipp_inp_func = icvDistanceTransform_3x3_8u_C1IR_p; } size = cvGetMatSize(src); if( (ipp_func || ipp_inp_func) && src->cols >= 4 && src->rows >= 2 ) { _imask[0] = cvRound(_mask[0]); _imask[1] = cvRound(_mask[1]); _imask[2] = cvRound(_mask[2]); if( ipp_func ) { IPPI_CALL( ipp_func( src->data.ptr, src->step, dst->data.fl, dst->step, size, CV_MAT_TYPE(dst->type) == CV_8UC1 ? (void*)_imask : (void*)_mask )); } else { IPPI_CALL( ipp_inp_func( src->data.ptr, src->step, size, _imask )); } } else if( CV_MAT_TYPE(dst->type) == CV_8UC1 ) { CV_CALL( icvDistanceATS_L1_8u( src, dst )); } else { int border = maskSize == CV_DIST_MASK_3 ? 1 : 2; CV_CALL( temp = cvCreateMat( size.height + border*2, size.width + border*2, CV_32SC1 )); if( !labels ) { CvDistTransFunc func = maskSize == CV_DIST_MASK_3 ? icvDistanceTransform_3x3_C1R : icvDistanceTransform_5x5_C1R; func( src->data.ptr, src->step, temp->data.i, temp->step, dst->data.fl, dst->step, size, _mask ); } else { CvSeq *contours = 0; CvPoint top_left = {0,0}, bottom_right = {size.width-1,size.height-1}; int label; CV_CALL( st = cvCreateMemStorage() ); CV_CALL( src_copy = cvCreateMat( size.height, size.width, src->type )); cvCmpS( src, 0, src_copy, CV_CMP_EQ ); cvFindContours( src_copy, st, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); cvZero( labels ); for( label = 1; contours != 0; contours = contours->h_next, label++ ) { CvScalar area_color = cvScalarAll(label); cvDrawContours( labels, contours, area_color, area_color, -255, -1, 8 ); } cvCopy( src, src_copy ); cvRectangle( src_copy, top_left, bottom_right, cvScalarAll(255), 1, 8 ); icvDistanceTransformEx_5x5_C1R( src_copy->data.ptr, src_copy->step, temp->data.i, temp->step, dst->data.fl, dst->step, labels->data.i, labels->step, size, _mask ); } } __END__; cvReleaseMat( &temp ); cvReleaseMat( &src_copy ); cvReleaseMemStorage( &st ); }
void faceDbCreator(const char filePath[50],const char coordsFilename[100], const int startFile,const int endFile, const int noIterations,const int border){ /**Number of Feature Points used in aligning images.**/ const int noFeaturePoints = 4; const int initialSize = 38; int i,j,k,iteration; /**No of files from DB added for alignment**/ int noFiles = 0; double xr = 0; double yr = 0; int x,y; char filePathCopy[100]; /**Corrds of the standards face with respect to initialSize**/ CvMat *stdCoords = cvCreateMat(noFeaturePoints*2,1, CV_64FC1); double stdCoordsData[] = {5+border,6+border,32+border, 6+border,18+border,15+border, 18+border,25+border}; stdCoords->data.db = stdCoordsData; /**Average Coords of the faces aligned so far**/ double avgData[noFeaturePoints*2]; CvMat *avgMat = cvCreateMat(noFeaturePoints*2,1, CV_64FC1); avgMat->data.db = avgData; /**Coords to which other coordinates are aligned to**/ double testData[noFeaturePoints*2]; CvMat *testMat = cvCreateMat(noFeaturePoints*2,1, CV_64FC1); testMat->data.db = testData; cvCopy(stdCoords,testMat); double tempCoords[noFeaturePoints*2]; /**Coords of all the image in the database**/ CvMat* coords[endFile-startFile+1]; double coordsData[endFile-startFile+1][noFeaturePoints*8]; /**Face DB image file names**/ char fileNames[endFile-startFile+1][100]; char tempFileName[100]; char tempStr[50]; IplImage *img = NULL; IplImage *dst = NULL; FILE* coordsFile = fopen(coordsFilename,"r+"); FILE* t = NULL; if (coordsFile){ for (i=-startFile+1;i<=endFile-startFile;++i){ if(!feof(coordsFile)){ fscanf(coordsFile,"%s %lf %lf %lf %lf %lf %lf %lf %lf",&tempStr, &tempCoords[0],&tempCoords[1],&tempCoords[2], &tempCoords[3],&tempCoords[4],&tempCoords[5], &tempCoords[6],&tempCoords[7]); /**Skip the coords upto startImage**/ if (i>=0){ strcpy(tempFileName,filePath); strcat(tempFileName,tempStr); /**Check whether the file exists**/ if (t=fopen(tempFileName,"r")){ fclose(t); strcpy(fileNames[noFiles],tempFileName); coords[noFiles] = cvCreateMat(noFeaturePoints*2,4, CV_64FC1); faceDbCreatorFillData(coordsData[noFiles],tempCoords,noFeaturePoints); coords[noFiles]->data.db = coordsData[noFiles]; ++noFiles; } } } else{ noFiles = i-1; break; } } fclose(coordsFile); if (!noFiles){ printf("Face DB Creator Error: No File To Process\n"); exit(EXIT_FAILURE); } } else { printf("Face DB Creator Error: Could Not Open Coords File\n"); exit(EXIT_FAILURE); } /**PsuedoInverse**/ CvMat *temp2 = cvCreateMat(4,1,CV_64FC1); double tempData2[4]; temp2->data.db = tempData2; for (iteration=0;iteration<noIterations;++iteration){ cvSetZero(avgMat); for (i=0;i<noFiles;++i){ pseudoInverse(coords[i],testMat,temp2); for (j=0;j<noFeaturePoints;++j){ xr = coordsData[i][j*8]*temp2->data.db[0] -coordsData[i][j*8+4]* temp2->data.db[1]+temp2->data.db[2]; yr = coordsData[i][j*8]*temp2->data.db[1] +coordsData[i][j*8+4]* temp2->data.db[0]+temp2->data.db[3]; coordsData[i][j*8] = xr; coordsData[i][j*8+5] = xr; coordsData[i][j*8+1] = -yr; coordsData[i][j*8+4] = yr; avgData[j*2] += xr; avgData[j*2+1] += yr; } img = cvLoadImage(fileNames[i], CV_LOAD_IMAGE_GRAYSCALE); dst = cvCreateImage(cvSize(initialSize+ 2*border,initialSize+2*border), img->depth,img->nChannels); cvSetZero(dst); double a = temp2->data.db[0]; double b = temp2->data.db[1]; double det = a*a+b*b; double tx = temp2->data.db[2]; double ty = temp2->data.db[3]; /**Transform the image**/ for (j=0;j<dst->height;++j){ for (k=0;k<dst->width;++k){ xr = ((k-tx)*a+(j-ty)*b)/det; yr = ((k-tx)*-b+(j-ty)*a)/det; if ((int)xr>=0 && (int)xr <img->width && (int)yr>=0 && (int)yr<img->height){ *((unsigned char*)(dst->imageData)+j*dst->widthStep+k)= *((unsigned char*)(img->imageData)+ (int)yr*img->widthStep+(int)xr); } } } cvSaveImage(fileNames[i],dst); cvReleaseImage(&img); cvReleaseImage(&dst); } /**Averge of the transformation performed so far**/ for (j=0;j<noFeaturePoints*2;++j){ avgData[j] /= endFile-startFile+1; } /**Perform transformation on the average data**/ CvMat* tempMat = cvCreateMat(noFeaturePoints*2,4, CV_64FC1); double tempMatData[noFeaturePoints*8]; tempMat->data.db = tempMatData; faceDbCreatorFillData(tempMatData,avgData,noFeaturePoints); pseudoInverse(tempMat,stdCoords,temp2); for (j=0;j<noFeaturePoints;++j){ testData[j*2] = avgData[j*2]*temp2->data.db[0]- avgData[j*2+1]*temp2->data.db[1]+ temp2->data.db[2]; testData[j*2+1] = avgData[j*2]*temp2->data.db[1]+ avgData[j*2+1]*temp2->data.db[0]+ temp2->data.db[3]; } cvReleaseMat(&tempMat); } IplImage *img8U,*img64F; CvRect *cropArea; IplImage *finalImage32F = cvCreateImage(cvSize(CROPPED_WIDTH, CROPPED_HEIGHT),IPL_DEPTH_32F,1); IplImage *finalImage8U = cvCreateImage(cvSize(CROPPED_WIDTH, CROPPED_HEIGHT),IPL_DEPTH_8U,1); IplImage *transformImage64F; IplImage *transformImage32F; IplImage *croppedImage32F = cvCreateImage(cvSize(initialSize, initialSize),IPL_DEPTH_32F,1); IplImage *croppedImage64F = cvCreateImage(cvSize(initialSize, initialSize),IPL_DEPTH_64F,1); IplImage* mask = cvCreateImage(cvGetSize (croppedImage64F),IPL_DEPTH_8U,1); maskGenerator(mask); /**Random transformations**/ double scale = 0; double rotate = 0; double translateX = 0; double translateY = 0; tempStr[0] = '_'; tempStr[4] = '.'; tempStr[5] = 'j'; tempStr[6] = 'p'; tempStr[7] = 'g'; tempStr[8] = '\0'; /**Random Number Generator**/ CvRNG rg; for (i=0;i<noFiles;++i){ img8U = cvLoadImage(fileNames[i], CV_LOAD_IMAGE_GRAYSCALE); img64F = cvCreateImage(cvGetSize(img8U), IPL_DEPTH_64F,1); cvConvertScale(img8U,img64F); cvReleaseImage(&img8U); remove(fileNames[i]); xr = coordsData[i][0]-stdCoordsData[0]+ border; yr = coordsData[i][4]-stdCoordsData[1]+ border; cvSetImageROI(img64F,cvRect(cvRound(xr),cvRound(yr),initialSize, initialSize)); cvCopy(img64F,croppedImage64F); /**Creating variations for each image**/ for (j=0;j<NO_VARIATIONS;++j){ lightingCorrection(croppedImage64F,mask); rg = cvRNG(time(0)*1000*(i+20)*(j+30)); cvConvertScale(croppedImage64F,croppedImage32F); cvResize(croppedImage32F,finalImage32F); cvConvertScale(finalImage32F,finalImage8U); tempStr[1] = (j/100)%10+48; tempStr[2] = (j/10)%10+48;tempStr[3]=j%10+48; strncpy(tempFileName,fileNames[i],strlen(fileNames[i])-4); tempFileName[strlen(fileNames[i])-4] ='\0'; strcat(tempFileName,tempStr); cvSaveImage(tempFileName,finalImage8U); switch (cvRandInt(&rg)%3){ /**Scaling**/ case 0: if (cvRandInt(&rg)%2) scale = cvRandReal(&rg)*MAX_SCALE* initialSize/CROPPED_WIDTH; else scale = cvRandReal(&rg)*MIN_SCALE* initialSize/CROPPED_HEIGHT; transformImage64F = cvCreateImage( cvSize(cvRound(initialSize-2*scale), cvRound(initialSize-2*scale)), IPL_DEPTH_64F,1); transformImage32F = cvCreateImage( cvSize(cvRound(initialSize-2*scale), cvRound(initialSize-2*scale)), IPL_DEPTH_32F,1); cvSetImageROI(img64F,cvRect(cvRound(xr+scale),cvRound(yr+scale), cvRound(initialSize-2*scale),cvRound(initialSize-2*scale))); cvCopy(img64F,transformImage64F); cvConvertScale(transformImage64F,transformImage32F); cvResize(transformImage32F,croppedImage32F); cvConvertScale(croppedImage32F,croppedImage64F); cvReleaseImage(&transformImage64F); cvReleaseImage(&transformImage32F); break; /**Rotation**/ case 1: if (cvRandInt(&rg)%2) rotate = cvRandReal(&rg)*MAX_ROTATE; else rotate = cvRandReal(&rg)*MIN_ROTATE; cvResetImageROI(img64F); transformImage64F = cvCreateImage(cvGetSize(img64F), IPL_DEPTH_64F,1); transformRotate(img64F,transformImage64F, &cvPoint2D64f(xr+initialSize/2,yr+initialSize/2),rotate*M_PI/180); cvSetImageROI(transformImage64F, cvRect(xr,yr,initialSize,initialSize)); cvCopy(transformImage64F,croppedImage64F); cvReleaseImage(&transformImage64F); break; default: /**Translation**/ if (cvRandInt(&rg)%2){ if (cvRandInt(&rg)%2){ translateX = cvRandReal(&rg)*MAX_TRANSLATE* initialSize/CROPPED_WIDTH; translateY = cvRandReal(&rg)*MAX_TRANSLATE* initialSize/CROPPED_HEIGHT; } else{ translateX = cvRandReal(&rg)*MIN_TRANSLATE* initialSize/CROPPED_WIDTH; translateY = cvRandReal(&rg)*MIN_TRANSLATE* initialSize/CROPPED_HEIGHT; } } else{ if (cvRandInt(&rg)%2){ translateX = cvRandReal(&rg)*MAX_TRANSLATE* initialSize/CROPPED_WIDTH; translateY = cvRandReal(&rg)*MIN_TRANSLATE* initialSize/CROPPED_HEIGHT; } else{ translateX = cvRandReal(&rg)*MIN_TRANSLATE* initialSize/CROPPED_WIDTH; translateY = cvRandReal(&rg)*MAX_TRANSLATE* initialSize/CROPPED_HEIGHT; } } cvSetImageROI(img64F,cvRect(cvRound(xr+translateX), cvRound(yr+translateY),initialSize,initialSize)); cvCopy(img64F,croppedImage64F); } } cvReleaseImage(&img64F); cvReleaseMat(&coords[i]); } cvReleaseImage(&finalImage8U); cvReleaseImage(&finalImage32F); cvReleaseImage(&croppedImage32F); cvReleaseImage(&croppedImage64F); cvReleaseMat(&stdCoords); cvReleaseMat(&testMat); cvReleaseMat(&avgMat); cvReleaseMat(&temp2); }
/*void pkmGaussianMixtureModel::getLikelihood(int x, int y) { } */ void pkmGaussianMixtureModel::getLikelihoodMap(int rows, int cols, unsigned char *map, ofstream &filePtr, int widthstep) { if(widthstep == 0) widthstep = cols; if(!bModeled) return; CvEM myModel = emModel[bestModel]; const CvMat **modelCovs = myModel.get_covs(); const CvMat *modelMus = myModel.get_means(); const CvMat *modelWeights = myModel.get_weights(); int numClusters = myModel.get_nclusters(); CvMat *pts = cvCreateMat(m_nVariables, 1, CV_64FC1); CvMat *mean = cvCreateMat(m_nVariables, 1, CV_64FC1); double p = 0; double weight; double max = 0; double prob; filePtr << "clusters: " << numClusters << "\n"; filePtr << "likelihood: " << m_Likelihood << "\n"; filePtr << "BIC: " << m_BIC << "\n"; //printf("clusters: %d\n", numClusters); float best_weight = 0; bestCluster = 0; for (int k = 0; k < numClusters; k++) { const CvMat * covar = modelCovs[k]; //printf("covar: (%f, %f, %f, %f)\n", covar->data.ptr[0], \ covar->data.ptr[1], \ covar->data.ptr[2], \ covar->data.ptr[3]); //const double *meanVals = (const double *)(modelMus->data.ptr + k*modelWeights->step); cvmSet(mean, 0, 0, cvmGet(modelMus, k, 0)); cvmSet(mean, 1, 0, cvmGet(modelMus, k, 1)); //const double *weightPtr = (const double *)(modelWeights->data.ptr + k*modelWeights->step); //weight = weightPtr[k]; weight = cvmGet(modelWeights, 0, k); if (best_weight < weight) { best_weight = weight; bestCluster = k; } filePtr << "mean: " << cvmGet(modelMus, k, 0)*(double)m_nScale << " " << cvmGet(modelMus, k, 1)*(double)m_nScale << "\n"; filePtr << "covar: " << cvmGet(covar, 0, 0) << "\n"; filePtr << "weight: " << weight << "\n"; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { cvmSet(pts, 0, 0, (double)j); cvmSet(pts, 1, 0, (double)i); prob = multinormalDistribution(pts, mean, covar); map[j+i*widthstep] += (int)((weight * prob)*(double)(rows*cols)); } } } cvReleaseMat(&mean); cvReleaseMat(&pts); }
bool KMeans2( const IplImage * pImg ,IplImage * pResult ,int sortFlag ,int nClusters ) { assert ( pImg != NULL && pImg -> nChannels == 1); // 创建样本矩阵, CV_32FC1 代表位浮点通道(灰度图像) CvMat * samples = cvCreateMat (( pImg -> width )* ( pImg -> height ),1, CV_32FC1 ); // 创建类别标记矩阵, CV_32SF1 代表位整型通道 CvMat * clusters = cvCreateMat (( pImg -> width )* ( pImg -> height ),1, CV_32SC1 ); // 创建类别中心矩阵 CvMat * centers = cvCreateMat ( nClusters , 1, CV_32FC1 ); // 将原始图像转换到样本矩阵 { int k = 0; CvScalar s ; for ( int i = 0; i < pImg -> width ; i ++) { for ( int j =0; j < pImg -> height ; j ++) { s . val [0] = ( float ) cvGet2D ( pImg , j , i ). val [0]; cvSet2D ( samples , k ++, 0, s ); } } } // 开始聚类,迭代次,终止误差 .0 cvKMeans2 ( samples , nClusters , clusters , cvTermCriteria ( CV_TERMCRIT_ITER + CV_TERMCRIT_EPS ,100, 1.0), 1, 0, 0, centers ); // 无需排序直接输出时 if ( sortFlag == 0) { int k = 0; int val = 0; float step = 255 / (( float ) nClusters - 1); CvScalar s ; for ( int i = 0; i < pImg -> width ; i ++) { for ( int j = 0; j < pImg -> height ; j ++) { val = ( int ) clusters -> data . i [ k ++]; s . val [0] = 255- val * step ; // 这个是将不同类别取不同的像素值, cvSet2D ( pResult , j , i , s ); // 将每个像素点赋值 } } return TRUE ; } }
// Read the training data and train the network. void trainMachine() { int i; //The number of training samples. int train_sample_count; //The training data matrix. //Note that we are limiting the number of training data samples to 1000 here. //The data sample consists of two inputs and an output. That's why 3. //td es la matriz dinde se cargan las muestras float td[3000][7]; //Read the training file /* A sample file contents(say we are training the network for generating the mean given two numbers) would be: 5 12 16 14 10 5 7.5 8 10 9 5 4 4.5 12 6 9 */ FILE *fin; fin = fopen("train.txt", "r"); //Get the number of samples. fscanf(fin, "%d", &train_sample_count); printf("Found training file with %d samples...\n", train_sample_count); //Create the matrices //Input data samples. Matrix of order (train_sample_count x 2) CvMat* trainData = cvCreateMat(train_sample_count, 6, CV_32FC1); //Output data samples. Matrix of order (train_sample_count x 1) CvMat* trainClasses = cvCreateMat(train_sample_count, 1, CV_32FC1); //The weight of each training data sample. We'll later set all to equal weights. CvMat* sampleWts = cvCreateMat(train_sample_count, 1, CV_32FC1); //The matrix representation of our ANN. We'll have four layers. CvMat* neuralLayers = cvCreateMat(2, 1, CV_32SC1); CvMat trainData1, trainClasses1, neuralLayers1, sampleWts1; cvGetRows(trainData, &trainData1, 0, train_sample_count); cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count); cvGetRows(trainClasses, &trainClasses1, 0, train_sample_count); cvGetRows(sampleWts, &sampleWts1, 0, train_sample_count); cvGetRows(neuralLayers, &neuralLayers1, 0, 2); //Setting the number of neurons on each layer of the ANN /* We have in Layer 1: 2 neurons (6 inputs) Layer 2: 3 neurons (hidden layer) Layer 3: 3 neurons (hidden layer) Layer 4: 1 neurons (1 output) */ cvSet1D(&neuralLayers1, 0, cvScalar(6)); //cvSet1D(&neuralLayers1, 1, cvScalar(3)); //cvSet1D(&neuralLayers1, 2, cvScalar(3)); cvSet1D(&neuralLayers1, 1, cvScalar(1)); //Read and populate the samples. for (i=0; i<train_sample_count; i++) fscanf(fin,"%f %f %f %f",&td[i][0],&td[i][1],&td[i][2],&td[i][3]); fclose(fin); //Assemble the ML training data. for (i=0; i<train_sample_count; i++) { //Input 1 cvSetReal2D(&trainData1, i, 0, td[i][0]); //Input 2 cvSetReal2D(&trainData1, i, 1, td[i][1]); cvSetReal2D(&trainData1, i, 2, td[i][2]); cvSetReal2D(&trainData1, i, 3, td[i][3]); cvSetReal2D(&trainData1, i, 4, td[i][4]); cvSetReal2D(&trainData1, i, 5, td[i][5]); //Output cvSet1D(&trainClasses1, i, cvScalar(td[i][6])); //Weight (setting everything to 1) cvSet1D(&sampleWts1, i, cvScalar(1)); } //Create our ANN. machineBrain.create(neuralLayers); //Train it with our data. //See the Machine learning reference at http://www.seas.upenn.edu/~bensapp/opencvdocs/ref/opencvref_ml.htm#ch_ann machineBrain.train( trainData, trainClasses, sampleWts, 0, CvANN_MLP_TrainParams( cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100000, 1.0 ), CvANN_MLP_TrainParams::BACKPROP, 0.01, 0.05 ) ); }
CV_IMPL void cvSegmentImage( const CvArr* srcarr, CvArr* dstarr, double canny_threshold, double ffill_threshold ) { CvMat* gray = 0; CvMat* canny = 0; void* stack = 0; CV_FUNCNAME( "cvSegmentImage" ); __BEGIN__; CvMat srcstub, *src; CvMat dststub, *dst; CvMat* mask; CvSize size; CvPoint pt; int ffill_lw_up = cvRound( fabs(ffill_threshold) ); CV_CALL( src = cvGetMat( srcarr, &srcstub )); CV_CALL( dst = cvGetMat( dstarr, &dststub )); if( src->data.ptr != dst->data.ptr ) { CV_CALL( cvCopy( src, dst )); src = dst; } size = cvGetSize( src ); CV_CALL( gray = cvCreateMat( size.height, size.width, CV_8UC1 )); CV_CALL( canny = cvCreateMat( size.height, size.width, CV_8UC1 )); CV_CALL( stack = cvAlloc( size.width * size.height * sizeof(Seg))); cvCvtColor( src, gray, CV_BGR2GRAY ); cvCanny( gray, canny, 0, canny_threshold, 5 ); mask = canny; // a new name for new role // make a non-zero border. cvRectangle( mask, cvPoint(0,0), cvPoint(size.width-1,size.height-1), 1, 1 ); for( pt.y = 0; pt.y < size.height; pt.y++ ) { for( pt.x = 0; pt.x < size.width; pt.x++ ) { if( mask->data.ptr[mask->step*pt.y + pt.x] == 0 ) { CvConnectedComp region; int avgVal[3] = { 0, 0, 0 }; icvSegmFloodFill_Stage1( src->data.ptr, src->step, mask->data.ptr, mask->step, size, pt, avgVal, ffill_lw_up, ffill_lw_up, ®ion, stack ); icvSegmFloodFill_Stage2( src->data.ptr, src->step, mask->data.ptr, mask->step, size, avgVal, region.rect ); } } } __END__; cvReleaseMat( &gray ); cvReleaseMat( &canny ); cvFree( &stack ); }
for (int i = (int)newObjects.size() - 1; i >= 0; --i) { if (newObjects[i].object.overlap(minScoreObject.object) > minScoreObject.object.area() * 0.8 && newObjects[i].object.area() > minScoreObject.object.area()) { foundbigger = true; minScoreObject = newObjects[i]; } <<<<<<< HEAD:vision/classifier.cpp ++where; } } printf("We used %d of them\n", where); cout << "kmeans starting..." << endl; CvMat *clusters = cvCreateMat(trainIpoints, 1, CV_32SC1); centers = cvCreateMat(NUM_CLUSTERS, SURF_SIZE, CV_32FC1); cvKMeans2( all_desc, // samples NUM_CLUSTERS, // clusters clusters, // labels cvTermCriteria( CV_TERMCRIT_EPS|CV_TERMCRIT_ITER, // End criteria 10, // Max iter 0.1), //accuracy 1, // attempts &rng, //rng 0, // flags centers, // centers NULL // compactness };
UINT WINAPI //DWORD WINAPI #elif defined(POSIX_SYS) // using pthread void * #endif ChessRecognition::HoughLineThread( #if defined(WINDOWS_SYS) LPVOID #elif defined(POSIX_SYS) void * #endif Param) { // 실제로 뒤에서 동작하는 windows용 thread함수. // 함수 인자로 클래스를 받아옴. ChessRecognition *_TChessRecognition = (ChessRecognition *)Param; _TChessRecognition->_HoughLineBased = new HoughLineBased(); CvSeq *_TLineX, *_TLineY; double _TH[] = { -1, -7, -15, 0, 15, 7, 1 }; CvMat _TDoGX = cvMat(1, 7, CV_64FC1, _TH); CvMat* _TDoGY = cvCreateMat(7, 1, CV_64FC1); cvTranspose(&_TDoGX, _TDoGY); // transpose(&DoGx) -> DoGy double _TMinValX, _TMaxValX, _TMinValY, _TMaxValY, _TMinValT, _TMaxValT; int _TKernel = 1; // Hough 사용되는 Image에 대한 Initialize. IplImage *iplTemp = cvCreateImage(cvSize(_TChessRecognition->_Width, _TChessRecognition->_Height), IPL_DEPTH_32F, 1); IplImage *iplDoGx = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1); IplImage *iplDoGy = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1); IplImage *iplDoGyClone = cvCloneImage(iplDoGy); IplImage *iplDoGxClone = cvCloneImage(iplDoGx); IplImage *iplEdgeX = cvCreateImage(cvGetSize(iplTemp), 8, 1); IplImage *iplEdgeY = cvCreateImage(cvGetSize(iplTemp), 8, 1); CvMemStorage* _TStorageX = cvCreateMemStorage(0), *_TStorageY = cvCreateMemStorage(0); while (_TChessRecognition->_EnableThread != false) { // 이미지를 받아옴. main루프와 동기를 맞추기 위해서 critical section 사용. _TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.lock(); //EnterCriticalSection(&(_TChessRecognition->cs)); cvConvert(_TChessRecognition->_ChessBoardDetectionInternalImage, iplTemp); //LeaveCriticalSection(&_TChessRecognition->cs); _TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.unlock(); // 각 X축 Y축 라인을 검출해 내기 위해서 filter 적용. cvFilter2D(iplTemp, iplDoGx, &_TDoGX); // 라인만 축출해내고. cvFilter2D(iplTemp, iplDoGy, _TDoGY); cvAbs(iplDoGx, iplDoGx); cvAbs(iplDoGy, iplDoGy); // 이미지 내부에서 최댓값과 최소값을 구하여 정규화. cvMinMaxLoc(iplDoGx, &_TMinValX, &_TMaxValX); cvMinMaxLoc(iplDoGy, &_TMinValY, &_TMaxValY); cvMinMaxLoc(iplTemp, &_TMinValT, &_TMaxValT); cvScale(iplDoGx, iplDoGx, 2.0 / _TMaxValX); // 정규화. cvScale(iplDoGy, iplDoGy, 2.0 / _TMaxValY); cvScale(iplTemp, iplTemp, 2.0 / _TMaxValT); cvCopy(iplDoGy, iplDoGyClone); cvCopy(iplDoGx, iplDoGxClone); // NMS진행후 추가 작업 _TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGx, iplDoGyClone, _TKernel); _TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGy, iplDoGxClone, _TKernel); cvConvert(iplDoGx, iplEdgeY); // IPL_DEPTH_8U로 다시 재변환. cvConvert(iplDoGy, iplEdgeX); double rho = 1.0; // distance resolution in pixel-related units. double theta = 1.0; // angle resolution measured in radians. int threshold = 20; if (threshold == 0) threshold = 1; // detecting 해낸 edge에서 hough line 검출. _TLineX = cvHoughLines2(iplEdgeX, _TStorageX, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0); _TLineY = cvHoughLines2(iplEdgeY, _TStorageY, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0); // cvSeq를 vector로 바꾸기 위한 연산. _TChessRecognition->_Vec_ProtectionMutex.lock(); _TChessRecognition->_HoughLineBased->CastSequence(_TLineX, _TLineY); _TChessRecognition->_Vec_ProtectionMutex.unlock(); Sleep(2); } // mat 할당 해제. cvReleaseMat(&_TDoGY); // 내부 연산에 사용된 이미지 할당 해제. cvReleaseImage(&iplTemp); cvReleaseImage(&iplDoGx); cvReleaseImage(&iplDoGy); cvReleaseImage(&iplDoGyClone); cvReleaseImage(&iplDoGxClone); cvReleaseImage(&iplEdgeX); cvReleaseImage(&iplEdgeY); // houghline2에 사용된 opencv 메모리 할당 해제. cvReleaseMemStorage(&_TStorageX); cvReleaseMemStorage(&_TStorageY); delete _TChessRecognition->_HoughLineBased; #if defined(WINDOWS_SYS) _endthread(); #elif defined(POSIX_SYS) #endif _TChessRecognition->_EndThread = true; return 0; }
void cvShowVecSamples( const char* filename, int winwidth, int winheight, double scale ) { CvVecFile file; short tmp; int i; CvMat* sample; tmp = 0; file.input = fopen( filename, "rb" ); if( file.input != NULL ) { fread( &file.count, sizeof( file.count ), 1, file.input ); fread( &file.vecsize, sizeof( file.vecsize ), 1, file.input ); fread( &tmp, sizeof( tmp ), 1, file.input ); fread( &tmp, sizeof( tmp ), 1, file.input ); if( file.vecsize != winwidth * winheight ) { int guessed_w = 0; int guessed_h = 0; fprintf( stderr, "Warning: specified sample width=%d and height=%d " "does not correspond to .vec file vector size=%d.\n", winwidth, winheight, file.vecsize ); if( file.vecsize > 0 ) { guessed_w = cvFloor( sqrt( (float) file.vecsize ) ); if( guessed_w > 0 ) { guessed_h = file.vecsize / guessed_w; } } if( guessed_w <= 0 || guessed_h <= 0 || guessed_w * guessed_h != file.vecsize) { fprintf( stderr, "Error: failed to guess sample width and height\n" ); fclose( file.input ); return; } else { winwidth = guessed_w; winheight = guessed_h; fprintf( stderr, "Guessed width=%d, guessed height=%d\n", winwidth, winheight ); } } if( !feof( file.input ) && scale > 0 ) { CvMat* scaled_sample = 0; file.last = 0; file.vector = (short*) cvAlloc( sizeof( *file.vector ) * file.vecsize ); sample = scaled_sample = cvCreateMat( winheight, winwidth, CV_8UC1 ); if( scale != 1.0 ) { scaled_sample = cvCreateMat( MAX( 1, cvCeil( scale * winheight ) ), MAX( 1, cvCeil( scale * winwidth ) ), CV_8UC1 ); } cvNamedWindow( "Sample", CV_WINDOW_AUTOSIZE ); for( i = 0; i < file.count; i++ ) { icvGetHaarTraininDataFromVecCallback( sample, &file ); if( scale != 1.0 ) cvResize( sample, scaled_sample, CV_INTER_LINEAR); cvShowImage( "Sample", scaled_sample ); if( cvWaitKey( 0 ) == 27 ) break; } if( scaled_sample && scaled_sample != sample ) cvReleaseMat( &scaled_sample ); cvReleaseMat( &sample ); cvFree( &file.vector ); } fclose( file.input ); } }
void reconstructSurface( const char* dirName, slParams* sl_params, slCalib* sl_calib) { IplImage** proj_gray_codes = NULL; int gray_ncols, gray_nrows; int gray_colshift, gray_rowshift; generateGrayCodes(sl_params->proj_w, sl_params->proj_h, proj_gray_codes, gray_ncols, gray_nrows, gray_colshift, gray_rowshift, sl_params->scan_cols, sl_params->scan_rows); IplImage **cam_gray_codes = new IplImage*[22]; int numImages = getLatestImages(dirName, cam_gray_codes, 22); IplImage* gray_decoded_cols = cvCreateImage(cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_16U, 1); IplImage* gray_decoded_rows = cvCreateImage(cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_16U, 1); IplImage* gray_mask = cvCreateImage(cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_8U, 1); decodeGrayCodes(sl_params->proj_w, sl_params->proj_h, cam_gray_codes, gray_decoded_cols, gray_decoded_rows, gray_mask, gray_ncols, gray_nrows, gray_colshift, gray_rowshift, sl_params->thresh); char str[1024], outputDir[1024]; mkdir(sl_params->outdir, 0755); std::string baseNameBuilder(dirName); size_t last_slash_position = baseNameBuilder.find_last_of("/"); baseNameBuilder = baseNameBuilder.substr(last_slash_position+1); const char* baseName = baseNameBuilder.c_str(); //sprintf(outputDir, "3D/%s", baseName); sprintf(outputDir, "%s/%s", sl_params->outdir, baseName); //mkdir("3D", 0755); mkdir(outputDir, 0755); // Display and save the correspondences. if(sl_params->display) displayDecodingResults(gray_decoded_cols, gray_decoded_rows, gray_mask, sl_params); // Reconstruct the point cloud and depth map. //printf("Reconstructing the point cloud and the depth map...\n"); CvMat *points = cvCreateMat(3, sl_params->cam_h*sl_params->cam_w, CV_32FC1); CvMat *colors = cvCreateMat(3, sl_params->cam_h*sl_params->cam_w, CV_32FC1); CvMat *depth_map = cvCreateMat(sl_params->cam_h, sl_params->cam_w, CV_32FC1); CvMat *mask = cvCreateMat(1, sl_params->cam_h*sl_params->cam_w, CV_32FC1); CvMat *resampled_points = cvCreateMat(3, sl_params->cam_h*sl_params->cam_w, CV_32FC1); reconstructStructuredLight(sl_params, sl_calib, cam_gray_codes[0], gray_decoded_cols, gray_decoded_rows, gray_mask, points, colors, depth_map, mask); // cvSave("points.xml",points); CvMat *points_trans = cvCreateMat(sl_params->cam_h*sl_params->cam_w, 3, CV_32FC1); cvTranspose(points, points_trans); downsamplePoints(sl_params, sl_calib, points_trans, mask, resampled_points, depth_map); double min_val, max_val; cvMinMaxLoc(depth_map, &min_val, &max_val); // Display and save the depth map. if(sl_params->display) displayDepthMap(depth_map, gray_mask, sl_params); //printf("Saving the depth map...\n"); IplImage* depth_map_image = cvCreateImage(cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_8U, 1); for(int r=0; r<sl_params->cam_h; r++){ for(int c=0; c<sl_params->cam_w; c++){ char* depth_map_image_data = (char*)(depth_map_image->imageData + r*depth_map_image->widthStep); if(mask->data.fl[sl_params->cam_w*r+c]) depth_map_image_data[c] = 255-int(255*(depth_map->data.fl[sl_params->cam_w*r+c]-sl_params->dist_range[0])/ (sl_params->dist_range[1]-sl_params->dist_range[0])); else depth_map_image_data[c] = 0; } } CvMat* dist_range = cvCreateMat(1, 2, CV_32FC1); cvmSet(dist_range, 0, 0, sl_params->dist_range[0]); cvmSet(dist_range, 0, 1, sl_params->dist_range[1]); sprintf(str, "%s/depth_map.png", outputDir); printf("%s\n",str); cvSaveImage(str, depth_map_image); sprintf(str, "%s/depth_map_range.xml", outputDir); cvSave(str, dist_range); cvReleaseImage(&depth_map_image); cvReleaseMat(&dist_range); // Save the texture map. //printf("Saving the texture map...\n"); sprintf(str, "%s/%s.png", outputDir, baseName); cvSaveImage(str, cam_gray_codes[0]); // Save the point cloud. //printf("Saving the point cloud...\n"); sprintf(str, "%s/%s.ply", outputDir, baseName); //if(savePointsPLY(str, resampled_points, NULL, NULL, mask, sl_params->proj_w, sl_params->proj_h)){ //if(savePointsPLY(str, resampled_points, NULL, NULL, mask, sl_params->cam_w, sl_params->cam_h)){ if(savePointsPLY(str, points, NULL, NULL, mask, sl_params->cam_w, sl_params->cam_h)){ fprintf(stderr, "Saving the reconstructed point cloud failed!\n"); return; } sprintf(str,"%s/proj_intrinsic.xml", outputDir); cvSave(str, sl_calib->proj_intrinsic); sprintf(str,"%s/proj_distortion.xml", outputDir); cvSave(str, sl_calib->proj_distortion); sprintf(str,"%s/cam_intrinsic.xml", outputDir); cvSave(str, sl_calib->cam_intrinsic); sprintf(str,"%s/cam_distortion.xml", outputDir); cvSave(str, sl_calib->cam_distortion); sprintf(str, "%s/cam_extrinsic.xml", outputDir); cvSave(str, sl_calib->cam_extrinsic); sprintf(str, "%s/proj_extrinsic.xml", outputDir); cvSave(str, sl_calib->proj_extrinsic); // Free allocated resources. cvReleaseImage(&gray_decoded_cols); cvReleaseImage(&gray_decoded_rows); cvReleaseImage(&gray_mask); cvReleaseMat(&points); cvReleaseMat(&colors); cvReleaseMat(&depth_map); cvReleaseMat(&mask); cvReleaseMat(&resampled_points); for(int i=0; i<(gray_ncols+gray_nrows+1); i++) cvReleaseImage(&proj_gray_codes[i]); delete[] proj_gray_codes; for(int i=0; i<2*(gray_ncols+gray_nrows+1); i++) cvReleaseImage(&cam_gray_codes[i]); delete[] cam_gray_codes; return; }
static void* imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 ) { CV_Assert(buf.data && buf.isContinuous()); IplImage* image = 0; CvMat *matrix = 0; Mat temp, *data = &temp; String filename; ImageDecoder decoder = findDecoder(buf); if( !decoder ) return 0; if( !decoder->setSource(buf) ) { filename = tempfile(); FILE* f = fopen( filename.c_str(), "wb" ); if( !f ) return 0; size_t bufSize = buf.cols*buf.rows*buf.elemSize(); fwrite( &buf.data[0], 1, bufSize, f ); fclose(f); decoder->setSource(filename); } if( !decoder->readHeader() ) { if( !filename.empty() ) remove(filename.c_str()); return 0; } CvSize size; size.width = decoder->width(); size.height = decoder->height(); int type = decoder->type(); if( flags != -1 ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); if( (flags & CV_LOAD_IMAGE_COLOR) != 0 || ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) ) type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3); else type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1); } if( hdrtype == LOAD_CVMAT || hdrtype == LOAD_MAT ) { if( hdrtype == LOAD_CVMAT ) { matrix = cvCreateMat( size.height, size.width, type ); temp = cvarrToMat(matrix); } else { mat->create( size.height, size.width, type ); data = mat; } } else { image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) ); temp = cvarrToMat(image); } bool code = decoder->readData( *data ); if( !filename.empty() ) remove(filename.c_str()); if( !code ) { cvReleaseImage( &image ); cvReleaseMat( &matrix ); if( mat ) mat->release(); return 0; } return hdrtype == LOAD_CVMAT ? (void*)matrix : hdrtype == LOAD_IMAGE ? (void*)image : (void*)mat; }
bool loadSLConfigXML(slParams *sl_params, slCalib *sl_calib, const char *configFile = CONFIG_FILE, const char *outputDir = NULL) { // Read structured lighting parameters from configuration file. FILE* pFile = fopen(configFile, "r"); if(pFile != NULL){ fclose(pFile); fprintf(stderr, "Reading configuration file...\n"); readConfiguration(configFile, sl_params); } else{ return false; } // Allocate storage for calibration parameters. int cam_nelems = sl_params->cam_w*sl_params->cam_h; int proj_nelems = sl_params->proj_w*sl_params->proj_h; sl_calib->cam_intrinsic_calib = false; sl_calib->proj_intrinsic_calib = false; sl_calib->procam_extrinsic_calib= false; sl_calib->cam_intrinsic = cvCreateMat(3,3,CV_32FC1); sl_calib->cam_distortion = cvCreateMat(5,1,CV_32FC1); sl_calib->cam_extrinsic = cvCreateMat(2, 3, CV_32FC1); sl_calib->proj_intrinsic = cvCreateMat(3, 3, CV_32FC1); sl_calib->proj_distortion = cvCreateMat(5, 1, CV_32FC1); sl_calib->proj_extrinsic = cvCreateMat(2, 3, CV_32FC1); sl_calib->cam_center = cvCreateMat(3, 1, CV_32FC1); sl_calib->proj_center = cvCreateMat(3, 1, CV_32FC1); sl_calib->cam_rays = cvCreateMat(3, cam_nelems, CV_32FC1); sl_calib->proj_rays = cvCreateMat(3, proj_nelems, CV_32FC1); sl_calib->proj_column_planes = cvCreateMat(sl_params->proj_w, 4, CV_32FC1); sl_calib->proj_row_planes = cvCreateMat(sl_params->proj_h, 4, CV_32FC1); if(outputDir) strcpy(sl_params->outdir, outputDir); // Load intrinsic camera calibration parameters (if found). char str1[1024], str2[1024]; sprintf(str1, "%s/calib/cam/cam_intrinsic.xml", sl_params->outdir); sprintf(str2, "%s/calib/cam/cam_distortion.xml", sl_params->outdir); if( ((CvMat*)cvLoad(str1) != 0) && ((CvMat*)cvLoad(str2) != 0) ){ sl_calib->cam_intrinsic = (CvMat*)cvLoad(str1); sl_calib->cam_distortion = (CvMat*)cvLoad(str2); sl_calib->cam_intrinsic_calib = true; fprintf(stderr, "Loaded previous intrinsic camera calibration.\n"); } else { fprintf(stderr, "WARNING: previous intrinsic calibration NOT loaded.\n"); } // Load intrinsic projector calibration parameters (if found); sprintf(str1, "%s/calib/proj/proj_intrinsic.xml", sl_params->outdir); sprintf(str2, "%s/calib/proj/proj_distortion.xml", sl_params->outdir); if( ((CvMat*)cvLoad(str1) != 0) && ((CvMat*)cvLoad(str2) != 0) ){ sl_calib->proj_intrinsic = (CvMat*)cvLoad(str1); sl_calib->proj_distortion = (CvMat*)cvLoad(str2); sl_calib->proj_intrinsic_calib = true; fprintf(stderr, "Loaded previous intrinsic projector calibration.\n"); } else { fprintf(stderr, "WARNING: previous intrinsic projector calibration NOT loaded.\n"); } // Load extrinsic projector-camera parameters (if found). sprintf(str1, "%s/calib/proj/cam_extrinsic.xml", sl_params->outdir); sprintf(str2, "%s/calib/proj/proj_extrinsic.xml", sl_params->outdir); if( (sl_calib->cam_intrinsic_calib && sl_calib->proj_intrinsic_calib) && ( ((CvMat*)cvLoad(str1) != 0) && ((CvMat*)cvLoad(str2) != 0) ) ){ sl_calib->cam_extrinsic = (CvMat*)cvLoad(str1); sl_calib->proj_extrinsic = (CvMat*)cvLoad(str2); sl_calib->procam_extrinsic_calib = true; evaluateProCamGeometry(sl_params, sl_calib); fprintf(stderr, "Loaded previous extrinsic projector-camera calibration.\n"); } else { fprintf(stderr, "WARNING: previous extrinsic projector-camera calibration NOT loaded.\n"); } // Initialize background model. sl_calib->background_depth_map = cvCreateMat( sl_params->cam_h, sl_params->cam_w, CV_32FC1); sl_calib->background_image = cvCreateImage( cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_8U, 3); sl_calib->background_mask = cvCreateImage( cvSize(sl_params->cam_w, sl_params->cam_h), IPL_DEPTH_8U, 1); cvSet(sl_calib->background_depth_map, cvScalar(FLT_MAX)); cvZero(sl_calib->background_image); cvSet(sl_calib->background_mask, cvScalar(255)); return true; }
int CV_QueryHistTest::prepare_test_case( int test_case_idx ) { int code = CV_BaseHistTest::prepare_test_case( test_case_idx ); if( code > 0 ) { int i, j, iters; float default_value = 0.f; CvRNG* rng = ts->get_rng(); CvMat* bit_mask = 0; int* idx; iters = (cvTsRandInt(rng) % MAX(total_size/10,100)) + 1; iters = MIN( iters, total_size*9/10 + 1 ); indices = cvCreateMat( 1, iters*cdims, CV_32S ); values = cvCreateMat( 1, iters, CV_32F ); values0 = cvCreateMat( 1, iters, CV_32F ); idx = indices->data.i; //printf( "total_size = %d, cdims = %d, iters = %d\n", total_size, cdims, iters ); bit_mask = cvCreateMat( 1, (total_size + 7)/8, CV_8U ); cvZero( bit_mask ); #define GET_BIT(n) (bit_mask->data.ptr[(n)/8] & (1 << ((n)&7))) #define SET_BIT(n) bit_mask->data.ptr[(n)/8] |= (1 << ((n)&7)) // set random histogram bins' values to the linear indices of the bins for( i = 0; i < iters; i++ ) { int lin_idx = 0; for( j = 0; j < cdims; j++ ) { int t = cvTsRandInt(rng) % dims[j]; idx[i*cdims + j] = t; lin_idx = lin_idx*dims[j] + t; } if( cvTsRandInt(rng) % 8 || GET_BIT(lin_idx) ) { values0->data.fl[i] = (float)(lin_idx+1); SET_BIT(lin_idx); } else // some histogram bins will not be initialized intentionally, // they should be equal to the default value values0->data.fl[i] = default_value; } // do the second pass to make values0 consistent with bit_mask for( i = 0; i < iters; i++ ) { int lin_idx = 0; for( j = 0; j < cdims; j++ ) lin_idx = lin_idx*dims[j] + idx[i*cdims + j]; if( GET_BIT(lin_idx) ) values0->data.fl[i] = (float)(lin_idx+1); } cvReleaseMat( &bit_mask ); } return code; }
bool CvGBTrees::train( const CvMat* _train_data, int _tflag, const CvMat* _responses, const CvMat* _var_idx, const CvMat* _sample_idx, const CvMat* _var_type, const CvMat* _missing_mask, CvGBTreesParams _params, bool /*_update*/ ) //update is not supported { CvMemStorage* storage = 0; params = _params; bool is_regression = problem_type(); clear(); /* n - count of samples m - count of variables */ int n = _train_data->rows; int m = _train_data->cols; if (_tflag != CV_ROW_SAMPLE) { int tmp; CV_SWAP(n,m,tmp); } CvMat* new_responses = cvCreateMat( n, 1, CV_32F); cvZero(new_responses); data = new CvDTreeTrainData( _train_data, _tflag, new_responses, _var_idx, _sample_idx, _var_type, _missing_mask, _params, true, true ); if (_missing_mask) { missing = cvCreateMat(_missing_mask->rows, _missing_mask->cols, _missing_mask->type); cvCopy( _missing_mask, missing); } orig_response = cvCreateMat( 1, n, CV_32F ); int step = (_responses->cols > _responses->rows) ? 1 : _responses->step / CV_ELEM_SIZE(_responses->type); switch (CV_MAT_TYPE(_responses->type)) { case CV_32FC1: { for (int i=0; i<n; ++i) orig_response->data.fl[i] = _responses->data.fl[i*step]; }; break; case CV_32SC1: { for (int i=0; i<n; ++i) orig_response->data.fl[i] = (float) _responses->data.i[i*step]; }; break; default: CV_Error(CV_StsUnmatchedFormats, "Response should be a 32fC1 or 32sC1 vector."); } if (!is_regression) { class_count = 0; unsigned char * mask = new unsigned char[n]; memset(mask, 0, n); // compute the count of different output classes for (int i=0; i<n; ++i) if (!mask[i]) { class_count++; for (int j=i; j<n; ++j) if (int(orig_response->data.fl[j]) == int(orig_response->data.fl[i])) mask[j] = 1; } delete[] mask; class_labels = cvCreateMat(1, class_count, CV_32S); class_labels->data.i[0] = int(orig_response->data.fl[0]); int j = 1; for (int i=1; i<n; ++i) { int k = 0; while ((int(orig_response->data.fl[i]) - class_labels->data.i[k]) && (k<j)) k++; if (k == j) { class_labels->data.i[k] = int(orig_response->data.fl[i]); j++; } } } // inside gbt learning proccess only regression decision trees are built data->is_classifier = false; // preproccessing sample indices if (_sample_idx) { int sample_idx_len = get_len(_sample_idx); switch (CV_MAT_TYPE(_sample_idx->type)) { case CV_32SC1: { sample_idx = cvCreateMat( 1, sample_idx_len, CV_32S ); for (int i=0; i<sample_idx_len; ++i) sample_idx->data.i[i] = _sample_idx->data.i[i]; } break; case CV_8S: case CV_8U: { int active_samples_count = 0; for (int i=0; i<sample_idx_len; ++i) active_samples_count += int( _sample_idx->data.ptr[i] ); sample_idx = cvCreateMat( 1, active_samples_count, CV_32S ); active_samples_count = 0; for (int i=0; i<sample_idx_len; ++i) if (int( _sample_idx->data.ptr[i] )) sample_idx->data.i[active_samples_count++] = i; } break; default: CV_Error(CV_StsUnmatchedFormats, "_sample_idx should be a 32sC1, 8sC1 or 8uC1 vector."); } icvSortFloat(sample_idx->data.fl, sample_idx_len, 0); } else { sample_idx = cvCreateMat( 1, n, CV_32S ); for (int i=0; i<n; ++i) sample_idx->data.i[i] = i; } sum_response = cvCreateMat(class_count, n, CV_32F); sum_response_tmp = cvCreateMat(class_count, n, CV_32F); cvZero(sum_response); delta = 0.0f; /* in the case of a regression problem the initial guess (the zero term in the sum) is set to the mean of all the training responses, that is the best constant model */ if (is_regression) base_value = find_optimal_value(sample_idx); /* in the case of a classification problem the initial guess (the zero term in the sum) is set to zero for all the trees sequences */ else base_value = 0.0f; /* current predicition on all training samples is set to be equal to the base_value */ cvSet( sum_response, cvScalar(base_value) ); weak = new pCvSeq[class_count]; for (int i=0; i<class_count; ++i) { storage = cvCreateMemStorage(); weak[i] = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvDTree*), storage ); storage = 0; } // subsample params and data rng = &cv::theRNG(); int samples_count = get_len(sample_idx); params.subsample_portion = params.subsample_portion <= FLT_EPSILON || 1 - params.subsample_portion <= FLT_EPSILON ? 1 : params.subsample_portion; int train_sample_count = cvFloor(params.subsample_portion * samples_count); if (train_sample_count == 0) train_sample_count = samples_count; int test_sample_count = samples_count - train_sample_count; int* idx_data = new int[samples_count]; subsample_train = cvCreateMatHeader( 1, train_sample_count, CV_32SC1 ); *subsample_train = cvMat( 1, train_sample_count, CV_32SC1, idx_data ); if (test_sample_count) { subsample_test = cvCreateMatHeader( 1, test_sample_count, CV_32SC1 ); *subsample_test = cvMat( 1, test_sample_count, CV_32SC1, idx_data + train_sample_count ); } // training procedure for ( int i=0; i < params.weak_count; ++i ) { do_subsample(); for ( int k=0; k < class_count; ++k ) { find_gradient(k); CvDTree* tree = new CvDTree; tree->train( data, subsample_train ); change_values(tree, k); if (subsample_test) { CvMat x; CvMat x_miss; int* sample_data = sample_idx->data.i; int* subsample_data = subsample_test->data.i; int s_step = (sample_idx->cols > sample_idx->rows) ? 1 : sample_idx->step/CV_ELEM_SIZE(sample_idx->type); for (int j=0; j<get_len(subsample_test); ++j) { int idx = *(sample_data + subsample_data[j]*s_step); float res = 0.0f; if (_tflag == CV_ROW_SAMPLE) cvGetRow( data->train_data, &x, idx); else cvGetCol( data->train_data, &x, idx); if (missing) { if (_tflag == CV_ROW_SAMPLE) cvGetRow( missing, &x_miss, idx); else cvGetCol( missing, &x_miss, idx); res = (float)tree->predict(&x, &x_miss)->value; } else { res = (float)tree->predict(&x)->value; } sum_response_tmp->data.fl[idx + k*n] = sum_response->data.fl[idx + k*n] + params.shrinkage * res; } } cvSeqPush( weak[k], &tree ); tree = 0; } // k=0..class_count CvMat* tmp; tmp = sum_response_tmp; sum_response_tmp = sum_response; sum_response = tmp; tmp = 0; } // i=0..params.weak_count delete[] idx_data; cvReleaseMat(&new_responses); data->free_train_data(); return true; } // CvGBTrees::train(...)