void CvEM::kmeans( const CvVectors& train_data, int nclusters, CvMat* labels, CvTermCriteria termcrit, const CvMat* centers0 ) { CvMat* centers = 0; CvMat* old_centers = 0; CvMat* counters = 0; CV_FUNCNAME( "CvEM::kmeans" ); __BEGIN__; CvRNG rng = cvRNG(-1); int i, j, k, nsamples, dims; int iter = 0; double max_dist = DBL_MAX; termcrit = cvCheckTermCriteria( termcrit, 1e-6, 100 ); termcrit.epsilon *= termcrit.epsilon; nsamples = train_data.count; dims = train_data.dims; nclusters = MIN( nclusters, nsamples ); CV_CALL( centers = cvCreateMat( nclusters, dims, CV_64FC1 )); CV_CALL( old_centers = cvCreateMat( nclusters, dims, CV_64FC1 )); CV_CALL( counters = cvCreateMat( 1, nclusters, CV_32SC1 )); cvZero( old_centers ); if( centers0 ) { CV_CALL( cvConvert( centers0, centers )); } else { for( i = 0; i < nsamples; i++ ) labels->data.i[i] = i*nclusters/nsamples; cvRandShuffle( labels, &rng ); } for( ;; ) { CvMat* temp; if( iter > 0 || centers0 ) { for( i = 0; i < nsamples; i++ ) { const float* s = train_data.data.fl[i]; int k_best = 0; double min_dist = DBL_MAX; for( k = 0; k < nclusters; k++ ) { const double* c = (double*)(centers->data.ptr + k*centers->step); double dist = 0; for( j = 0; j <= dims - 4; j += 4 ) { double t0 = c[j] - s[j]; double t1 = c[j+1] - s[j+1]; dist += t0*t0 + t1*t1; t0 = c[j+2] - s[j+2]; t1 = c[j+3] - s[j+3]; dist += t0*t0 + t1*t1; } for( ; j < dims; j++ ) { double t = c[j] - s[j]; dist += t*t; } if( min_dist > dist ) { min_dist = dist; k_best = k; } } labels->data.i[i] = k_best; } } if( ++iter > termcrit.max_iter ) break; CV_SWAP( centers, old_centers, temp ); cvZero( centers ); cvZero( counters ); // update centers for( i = 0; i < nsamples; i++ ) { const float* s = train_data.data.fl[i]; k = labels->data.i[i]; double* c = (double*)(centers->data.ptr + k*centers->step); for( j = 0; j <= dims - 4; j += 4 ) { double t0 = c[j] + s[j]; double t1 = c[j+1] + s[j+1]; c[j] = t0; c[j+1] = t1; t0 = c[j+2] + s[j+2]; t1 = c[j+3] + s[j+3]; c[j+2] = t0; c[j+3] = t1; } for( ; j < dims; j++ ) c[j] += s[j]; counters->data.i[k]++; } if( iter > 1 ) max_dist = 0; for( k = 0; k < nclusters; k++ ) { double* c = (double*)(centers->data.ptr + k*centers->step); if( counters->data.i[k] != 0 ) { double scale = 1./counters->data.i[k]; for( j = 0; j < dims; j++ ) c[j] *= scale; } else { const float* s; for( j = 0; j < 10; j++ ) { i = cvRandInt( &rng ) % nsamples; if( counters->data.i[labels->data.i[i]] > 1 ) break; } s = train_data.data.fl[i]; for( j = 0; j < dims; j++ ) c[j] = s[j]; } if( iter > 1 ) { double dist = 0; const double* c_o = (double*)(old_centers->data.ptr + k*old_centers->step); for( j = 0; j < dims; j++ ) { double t = c[j] - c_o[j]; dist += t*t; } if( max_dist < dist ) max_dist = dist; } } if( max_dist < termcrit.epsilon ) break; } cvZero( counters ); for( i = 0; i < nsamples; i++ ) counters->data.i[labels->data.i[i]]++; // ensure that we do not have empty clusters for( k = 0; k < nclusters; k++ ) if( counters->data.i[k] == 0 ) for(;;) { i = cvRandInt(&rng) % nsamples; j = labels->data.i[i]; if( counters->data.i[j] > 1 ) { labels->data.i[i] = k; counters->data.i[j]--; counters->data.i[k]++; break; } } __END__; cvReleaseMat( ¢ers ); cvReleaseMat( &old_centers ); cvReleaseMat( &counters ); }
void CvEM::set_params( const CvEMParams& _params, const CvVectors& train_data ) { CV_FUNCNAME( "CvEM::set_params" ); __BEGIN__; int k; params = _params; params.term_crit = cvCheckTermCriteria( params.term_crit, 1e-6, 10000 ); if( params.cov_mat_type != COV_MAT_SPHERICAL && params.cov_mat_type != COV_MAT_DIAGONAL && params.cov_mat_type != COV_MAT_GENERIC ) CV_ERROR( CV_StsBadArg, "Unknown covariation matrix type" ); switch( params.start_step ) { case START_M_STEP: if( !params.probs ) CV_ERROR( CV_StsNullPtr, "Probabilities must be specified when EM algorithm starts with M-step" ); break; case START_E_STEP: if( !params.means ) CV_ERROR( CV_StsNullPtr, "Mean's must be specified when EM algorithm starts with E-step" ); break; case START_AUTO_STEP: break; default: CV_ERROR( CV_StsBadArg, "Unknown start_step" ); } if( params.nclusters < 1 ) CV_ERROR( CV_StsOutOfRange, "The number of clusters (mixtures) should be > 0" ); if( params.probs ) { const CvMat* p = params.weights; if( !CV_IS_MAT(p) || (CV_MAT_TYPE(p->type) != CV_32FC1 && CV_MAT_TYPE(p->type) != CV_64FC1) || p->rows != train_data.count || p->cols != params.nclusters ) CV_ERROR( CV_StsBadArg, "The array of probabilities must be a valid " "floating-point matrix (CvMat) of 'nsamples' x 'nclusters' size" ); } if( params.means ) { const CvMat* m = params.means; if( !CV_IS_MAT(m) || (CV_MAT_TYPE(m->type) != CV_32FC1 && CV_MAT_TYPE(m->type) != CV_64FC1) || m->rows != params.nclusters || m->cols != train_data.dims ) CV_ERROR( CV_StsBadArg, "The array of mean's must be a valid " "floating-point matrix (CvMat) of 'nsamples' x 'dims' size" ); } if( params.weights ) { const CvMat* w = params.weights; if( !CV_IS_MAT(w) || (CV_MAT_TYPE(w->type) != CV_32FC1 && CV_MAT_TYPE(w->type) != CV_64FC1) || (w->rows != 1 && w->cols != 1) || w->rows + w->cols - 1 != params.nclusters ) CV_ERROR( CV_StsBadArg, "The array of weights must be a valid " "1d floating-point vector (CvMat) of 'nclusters' elements" ); } if( params.covs ) for( k = 0; k < params.nclusters; k++ ) { const CvMat* cov = params.covs[k]; if( !CV_IS_MAT(cov) || (CV_MAT_TYPE(cov->type) != CV_32FC1 && CV_MAT_TYPE(cov->type) != CV_64FC1) || cov->rows != cov->cols || cov->cols != train_data.dims ) CV_ERROR( CV_StsBadArg, "Each of covariation matrices must be a valid square " "floating-point matrix (CvMat) of 'dims' x 'dims'" ); } __END__; }
/*F/////////////////////////////////////////////////////////////////////////////////////// // Name: cvMeanShift // Purpose: MeanShift algorithm // Context: // Parameters: // imgProb - 2D object probability distribution // windowIn - CvRect of CAMSHIFT Window intial size // numIters - If CAMSHIFT iterates this many times, stop // windowOut - Location, height and width of converged CAMSHIFT window // len - If != NULL, return equivalent len // width - If != NULL, return equivalent width // itersUsed - Returns number of iterations CAMSHIFT took to converge // Returns: // The function itself returns the area found // Notes: //F*/ CV_IMPL int cvMeanShift( const void* imgProb, CvRect windowIn, CvTermCriteria criteria, CvConnectedComp* comp ) { CvMoments moments; int i = 0, eps; CvMat stub, *mat = (CvMat*)imgProb; CvMat cur_win; CvRect cur_rect = windowIn; CV_FUNCNAME( "cvMeanShift" ); if( comp ) comp->rect = windowIn; moments.m00 = moments.m10 = moments.m01 = 0; __BEGIN__; CV_CALL( mat = cvGetMat( mat, &stub )); if( CV_MAT_CN( mat->type ) > 1 ) CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat ); if( windowIn.height <= 0 || windowIn.width <= 0 ) CV_ERROR( CV_StsBadArg, "Input window has non-positive sizes" ); if( windowIn.x < 0 || windowIn.x + windowIn.width > mat->cols || windowIn.y < 0 || windowIn.y + windowIn.height > mat->rows ) CV_ERROR( CV_StsBadArg, "Initial window is not inside the image ROI" ); CV_CALL( criteria = cvCheckTermCriteria( criteria, 1., 100 )); eps = cvRound( criteria.epsilon * criteria.epsilon ); for( i = 0; i < criteria.max_iter; i++ ) { int dx, dy, nx, ny; double inv_m00; CV_CALL( cvGetSubRect( mat, &cur_win, cur_rect )); CV_CALL( cvMoments( &cur_win, &moments )); /* Calculating center of mass */ if( fabs(moments.m00) < DBL_EPSILON ) break; inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00; dx = cvRound( moments.m10 * inv_m00 - windowIn.width*0.5 ); dy = cvRound( moments.m01 * inv_m00 - windowIn.height*0.5 ); nx = cur_rect.x + dx; ny = cur_rect.y + dy; if( nx < 0 ) nx = 0; else if( nx + cur_rect.width > mat->cols ) nx = mat->cols - cur_rect.width; if( ny < 0 ) ny = 0; else if( ny + cur_rect.height > mat->rows ) ny = mat->rows - cur_rect.height; dx = nx - cur_rect.x; dy = ny - cur_rect.y; cur_rect.x = nx; cur_rect.y = ny; /* Check for coverage centers mass & window */ if( dx*dx + dy*dy < eps ) break; } __END__; if( comp ) { comp->rect = cur_rect; comp->area = (float)moments.m00; } return i; }
/*F/////////////////////////////////////////////////////////////////////////////////////// // Name: icvContourFromContourTree // Purpose: // reconstracts contour from binary tree representation // Context: // Parameters: // tree - pointer to the input binary tree representation // storage - pointer to the current storage block // contour - pointer to output contour object. // criteria - criteria for the definition threshold value // for the contour reconstracting (level or precision) //F*/ CV_IMPL CvSeq* cvContourFromContourTree( const CvContourTree* tree, CvMemStorage* storage, CvTermCriteria criteria ) { CvSeq* contour = 0; _CvTrianAttr **ptr_buf = 0; /* pointer to the pointer's buffer */ int *level_buf = 0; int i_buf; int lpt; double area_all; double threshold; int cur_level; int level; int seq_flags; char log_iter, log_eps; int out_hearder_size; _CvTrianAttr *tree_one = 0, tree_root; /* current vertex */ CvSeqReader reader; CvSeqWriter writer; CV_FUNCNAME("cvContourFromContourTree"); __BEGIN__; if( !tree ) CV_ERROR( CV_StsNullPtr, "" ); if( !CV_IS_SEQ_POLYGON_TREE( tree )) CV_ERROR_FROM_STATUS( CV_BADFLAG_ERR ); criteria = cvCheckTermCriteria( criteria, 0., 100 ); lpt = tree->total; ptr_buf = NULL; level_buf = NULL; i_buf = 0; cur_level = 0; log_iter = (char) (criteria.type == CV_TERMCRIT_ITER || (criteria.type == CV_TERMCRIT_ITER + CV_TERMCRIT_EPS)); log_eps = (char) (criteria.type == CV_TERMCRIT_EPS || (criteria.type == CV_TERMCRIT_ITER + CV_TERMCRIT_EPS)); cvStartReadSeq( (CvSeq *) tree, &reader, 0 ); out_hearder_size = sizeof( CvContour ); seq_flags = CV_SEQ_POLYGON; cvStartWriteSeq( seq_flags, out_hearder_size, sizeof( CvPoint ), storage, &writer ); ptr_buf = (_CvTrianAttr **) cvAlloc( lpt * sizeof( _CvTrianAttr * )); if( ptr_buf == NULL ) CV_ERROR_FROM_STATUS( CV_OUTOFMEM_ERR ); if( log_iter ) { level_buf = (int *) cvAlloc( lpt * (sizeof( int ))); if( level_buf == NULL ) CV_ERROR_FROM_STATUS( CV_OUTOFMEM_ERR ); } memset( ptr_buf, 0, lpt * sizeof( _CvTrianAttr * )); /* write the first tree root's point as a start point of the result contour */ CV_WRITE_SEQ_ELEM( tree->p1, writer ); /* write the second tree root"s point into buffer */ /* read the root of the tree */ CV_READ_SEQ_ELEM( tree_root, reader ); tree_one = &tree_root; area_all = tree_one->area; if( log_eps ) threshold = criteria.epsilon * area_all; else threshold = 10 * area_all; if( log_iter ) level = criteria.max_iter; else level = -1; /* contour from binary tree constraction */ while( i_buf >= 0 ) { if( tree_one != NULL && (cur_level <= level || tree_one->area >= threshold) ) /* go to left sub tree for the vertex and save pointer to the right vertex */ /* into the buffer */ { ptr_buf[i_buf] = tree_one; if( log_iter ) { level_buf[i_buf] = cur_level; cur_level++; } i_buf++; tree_one = tree_one->next_v1; } else { i_buf--; if( i_buf >= 0 ) { CvPoint pt = ptr_buf[i_buf]->pt; CV_WRITE_SEQ_ELEM( pt, writer ); tree_one = ptr_buf[i_buf]->next_v2; if( log_iter ) { cur_level = level_buf[i_buf] + 1; } } } } contour = cvEndWriteSeq( &writer ); cvBoundingRect( contour, 1 ); __CLEANUP__; __END__; cvFree( &level_buf ); cvFree( &ptr_buf ); return contour; }
CV_IMPL void cvKMeans2( const CvArr* samples_arr, int cluster_count, CvArr* labels_arr, CvTermCriteria termcrit ) { CvMat* centers = 0; CvMat* old_centers = 0; CvMat* counters = 0; CV_FUNCNAME( "cvKMeans2" ); __BEGIN__; CvMat samples_stub, labels_stub; CvMat* samples = (CvMat*)samples_arr; CvMat* labels = (CvMat*)labels_arr; CvMat* temp = 0; CvRNG rng = CvRNG(-1); int i, j, k, sample_count, dims; int ids_delta, iter; double max_dist; if( !CV_IS_MAT( samples )) CV_CALL( samples = cvGetMat( samples, &samples_stub )); if( !CV_IS_MAT( labels )) CV_CALL( labels = cvGetMat( labels, &labels_stub )); if( cluster_count < 1 ) CV_ERROR( CV_StsOutOfRange, "Number of clusters should be positive" ); if( CV_MAT_DEPTH(samples->type) != CV_32F || CV_MAT_TYPE(labels->type) != CV_32SC1 ) CV_ERROR( CV_StsUnsupportedFormat, "samples should be floating-point matrix, cluster_idx - integer vector" ); if( labels->rows != 1 && (labels->cols != 1 || !CV_IS_MAT_CONT(labels->type)) || labels->rows + labels->cols - 1 != samples->rows ) CV_ERROR( CV_StsUnmatchedSizes, "cluster_idx should be 1D vector of the same number of elements as samples' number of rows" ); CV_CALL( termcrit = cvCheckTermCriteria( termcrit, 1e-6, 100 )); termcrit.epsilon *= termcrit.epsilon; sample_count = samples->rows; if( cluster_count > sample_count ) cluster_count = sample_count; dims = samples->cols*CV_MAT_CN(samples->type); ids_delta = labels->step ? labels->step/(int)sizeof(int) : 1; CV_CALL( centers = cvCreateMat( cluster_count, dims, CV_64FC1 )); CV_CALL( old_centers = cvCreateMat( cluster_count, dims, CV_64FC1 )); CV_CALL( counters = cvCreateMat( 1, cluster_count, CV_32SC1 )); // init centers for( i = 0; i < sample_count; i++ ) labels->data.i[i] = cvRandInt(&rng) % cluster_count; counters->cols = cluster_count; // cut down counters max_dist = termcrit.epsilon*2; for( iter = 0; iter < termcrit.max_iter; iter++ ) { // computer centers cvZero( centers ); cvZero( counters ); for( i = 0; i < sample_count; i++ ) { float* s = (float*)(samples->data.ptr + i*samples->step); k = labels->data.i[i*ids_delta]; double* c = (double*)(centers->data.ptr + k*centers->step); for( j = 0; j <= dims - 4; j += 4 ) { double t0 = c[j] + s[j]; double t1 = c[j+1] + s[j+1]; c[j] = t0; c[j+1] = t1; t0 = c[j+2] + s[j+2]; t1 = c[j+3] + s[j+3]; c[j+2] = t0; c[j+3] = t1; } for( ; j < dims; j++ ) c[j] += s[j]; counters->data.i[k]++; } if( iter > 0 ) max_dist = 0; for( k = 0; k < cluster_count; k++ ) { double* c = (double*)(centers->data.ptr + k*centers->step); if( counters->data.i[k] != 0 ) { double scale = 1./counters->data.i[k]; for( j = 0; j < dims; j++ ) c[j] *= scale; } else { i = cvRandInt( &rng ) % sample_count; float* s = (float*)(samples->data.ptr + i*samples->step); for( j = 0; j < dims; j++ ) c[j] = s[j]; } if( iter > 0 ) { double dist = 0; double* c_o = (double*)(old_centers->data.ptr + k*old_centers->step); for( j = 0; j < dims; j++ ) { double t = c[j] - c_o[j]; dist += t*t; } if( max_dist < dist ) max_dist = dist; } } // assign labels for( i = 0; i < sample_count; i++ ) { float* s = (float*)(samples->data.ptr + i*samples->step); int k_best = 0; double min_dist = DBL_MAX; for( k = 0; k < cluster_count; k++ ) { double* c = (double*)(centers->data.ptr + k*centers->step); double dist = 0; j = 0; for( ; j <= dims - 4; j += 4 ) { double t0 = c[j] - s[j]; double t1 = c[j+1] - s[j+1]; dist += t0*t0 + t1*t1; t0 = c[j+2] - s[j+2]; t1 = c[j+3] - s[j+3]; dist += t0*t0 + t1*t1; } for( ; j < dims; j++ ) { double t = c[j] - s[j]; dist += t*t; } if( min_dist > dist ) { min_dist = dist; k_best = k; } } labels->data.i[i*ids_delta] = k_best; } if( max_dist < termcrit.epsilon ) break; CV_SWAP( centers, old_centers, temp ); } cvZero( counters ); for( i = 0; i < sample_count; i++ ) counters->data.i[labels->data.i[i]]++; // ensure that we do not have empty clusters for( k = 0; k < cluster_count; k++ ) if( counters->data.i[k] == 0 ) for(;;) { i = cvRandInt(&rng) % sample_count; j = labels->data.i[i]; if( counters->data.i[j] > 1 ) { labels->data.i[i] = k; counters->data.i[j]--; counters->data.i[k]++; break; } } __END__; cvReleaseMat( ¢ers ); cvReleaseMat( &old_centers ); cvReleaseMat( &counters ); }