Esempio n. 1
0
bool CFeatureExtraction::DoPCA(CvMat * pMat, CvMat * pResultMat, int nSize, int nExpectedSize)
{
	printf("\nCFeatureExtraction::DoPCA in\n");
	int i;	

	printf("DoPCA: Sort our data sets in a vector each\n");	
	// Sort our data sets in a vector each
	CvMat ** pDataSet = new CvMat*[m_nWidth*m_nHeight];
	float * pData = pMat->data.fl;
	for (i=0;i<m_nWidth*m_nHeight;i++)
	{
		pDataSet[i] = (CvMat*) malloc(sizeof(CvMat));
		cvInitMatHeader(pDataSet[i], 1, nSize, CV_32FC1, &pData[i*nSize]);
	}
	
	printf("DoPCA: Calc covariance matrix\n");
	// Calc covariance matrix
	CvMat* pCovMat = cvCreateMat( nSize, nSize, CV_32F );
	CvMat* pMeanVec = cvCreateMat( 1, nSize, CV_32F );
	
	cvCalcCovarMatrix( (const void **)pDataSet, m_nWidth*m_nHeight, pCovMat, pMeanVec, CV_COVAR_SCALE | CV_COVAR_NORMAL );
	
	cvReleaseMat(&pMeanVec);
	
	printf("DoPCA: Do the SVD decomposition\n");
	// Do the SVD decomposition
	CvMat* pMatW = cvCreateMat( nSize, 1, CV_32F );
	CvMat* pMatV = cvCreateMat( nSize, nSize, CV_32F );
	CvMat* pMatU = cvCreateMat( nSize, nSize, CV_32F );
	
	cvSVD(pCovMat, pMatW, pMatU, pMatV, CV_SVD_MODIFY_A+CV_SVD_V_T);
	
	cvReleaseMat(&pCovMat);
	cvReleaseMat(&pMatW);
	cvReleaseMat(&pMatV);

	printf("DoPCA: Extract the requested number of dominant eigen vectors\n");
	// Extract the requested number of dominant eigen vectors
	CvMat* pEigenVecs = cvCreateMat( nSize, nExpectedSize, CV_32F );
	for (i=0;i<nSize;i++)
		memcpy(&pEigenVecs->data.fl[i*nExpectedSize], &pMatU->data.fl[i*nSize], nExpectedSize*sizeof(float));

	printf("DoPCA: Transform to the new basis\n");
	// Transform to the new basis	
	cvMatMul(pMat, pEigenVecs, pResultMat);
	cvReleaseMat(&pMatU);
	cvReleaseMat(&pEigenVecs);
	
	for (i = 0; i < m_nHeight * m_nWidth; i++)
		delete [] pDataSet[i];
	delete [] pDataSet;

	printf("CFeatureExtraction::DoPCA out\n");
	return true;
}
Esempio n. 2
0
File: mlem.cpp Progetto: glo/ee384b
void CvEM::init_auto( const CvVectors& train_data )
{
    CvMat* hdr = 0;
    const void** vec = 0;
    CvMat* class_ranges = 0;
    CvMat* labels = 0;

    CV_FUNCNAME( "CvEM::init_auto" );

    __BEGIN__;

    int nclusters = params.nclusters, nsamples = train_data.count, dims = train_data.dims;
    int i, j;

    if( nclusters == nsamples )
    {
        CvMat src = cvMat( 1, dims, CV_32F );
        CvMat dst = cvMat( 1, dims, CV_64F );
        for( i = 0; i < nsamples; i++ )
        {
            src.data.ptr = train_data.data.ptr[i];
            dst.data.ptr = means->data.ptr + means->step*i;
            cvConvert( &src, &dst );
            cvZero( covs[i] );
            cvSetIdentity( cov_rotate_mats[i] );
        }
        cvSetIdentity( probs );
        cvSet( weights, cvScalar(1./nclusters) );
    }
    else
    {
        int max_count = 0;

        CV_CALL( class_ranges = cvCreateMat( 1, nclusters+1, CV_32SC1 ));
        if( nclusters > 1 )
        {
            CV_CALL( labels = cvCreateMat( 1, nsamples, CV_32SC1 ));
            kmeans( train_data, nclusters, labels, cvTermCriteria( CV_TERMCRIT_ITER,
                    params.means ? 1 : 10, 0.5 ), params.means );
            CV_CALL( cvSortSamplesByClasses( (const float**)train_data.data.fl,
                                            labels, class_ranges->data.i ));
        }
        else
        {
            class_ranges->data.i[0] = 0;
            class_ranges->data.i[1] = nsamples;
        }

        for( i = 0; i < nclusters; i++ )
        {
            int left = class_ranges->data.i[i], right = class_ranges->data.i[i+1];
            max_count = MAX( max_count, right - left );
        }
        CV_CALL( hdr = (CvMat*)cvAlloc( max_count*sizeof(hdr[0]) ));
        CV_CALL( vec = (const void**)cvAlloc( max_count*sizeof(vec[0]) ));
        hdr[0] = cvMat( 1, dims, CV_32F );
        for( i = 0; i < max_count; i++ )
        {
            vec[i] = hdr + i;
            hdr[i] = hdr[0];
        }

        for( i = 0; i < nclusters; i++ )
        {
            int left = class_ranges->data.i[i], right = class_ranges->data.i[i+1];
            int cluster_size = right - left;
            CvMat avg;

            if( cluster_size <= 0 )
                continue;

            for( j = left; j < right; j++ )
                hdr[j - left].data.fl = train_data.data.fl[j];

            CV_CALL( cvGetRow( means, &avg, i ));
            CV_CALL( cvCalcCovarMatrix( vec, cluster_size, covs[i],
                &avg, CV_COVAR_NORMAL | CV_COVAR_SCALE ));
            weights->data.db[i] = (double)cluster_size/(double)nsamples;
        }
    }

    __END__;

    cvReleaseMat( &class_ranges );
    cvReleaseMat( &labels );
    cvFree( &hdr );
    cvFree( &vec );
}
Esempio n. 3
0
void train(CvArr **Means,CvArr **eigenVects,CvArr ** eigenVals,int numGestures,int numTraining,int numFeatures, int indx[]) {
	
	int two=2;
	char num[] ="01-01";
	double *Features;
	char *fo = ".png";
	char filename[] = "../../../TrainingData/01-01.png";
	int i,j,k;
	double avg;
	CvScalar t;
	CvArr *tmpCov = cvCreateMat(numFeatures,numFeatures,CV_64FC1);
   //	CvArr *tmpEigVec = cvCreateMat(numFeatures,numFeatures,CV_64FC1);
	//1CvArr **tmp= (CvArr **)malloc(sizeof(CvArr *)*numFeatures); 
	CvArr **tmp= (CvArr **)malloc(sizeof(CvArr *)*numTraining);
	for(k=0; k<numTraining; k++)	//1numFeatures
		tmp[k] = cvCreateMat(1,numFeatures,CV_64FC1); //1tmp[k] = cvCreateMat(1,numTraining,CV_64FC1);

	IplImage* src;
	
	for(i=0; i< numGestures; i++) {
		Means[i] = cvCreateMat(1,numFeatures,CV_64FC1);

		for(j=0;j<numTraining; j++) {

			filename[22] = '\0';

			num[0] = '0'+indx[i]/10 ;
			num[1] = '0'+indx[i]%10;
				
			if((j+1)>9) {
				num[3] = '0'+(j+1)/10;
				num[4] = '0'+(j+1)%10;
				num[5] = '\0';				
			}
			else {
				num[3] = '0'+j+1;
				num[4] = '\0';	
			}
			strcat(filename,num);
			strcat(filename,fo);
			//fprintf(stderr,"i=%d j=%d %s \n",i,j,filename);		
			src = cvLoadImage( filename,CV_LOAD_IMAGE_GRAYSCALE );
			Features=computeFDFeatures(src,numFeatures);
			//fprintf(stderr,"got contour\n");		
			for(k=0; k < numFeatures; k++)
				*( (double*)CV_MAT_ELEM_PTR( *((CvMat *)tmp[j]),0,k ) ) = Features[k]; //1*((CvMat *)tmp[k]),0,j
			//fprintf(stderr,"copied values\n");
			free(Features);
			//cvReleaseImage( &src );				
		}

		/*for(k=0;k<numFeatures;k++) {
			avg=0;
			for(j=0;j<numGestures;j++)
				avg = avg+CV_MAT_ELEM( *((CvMat*)tmp[k]), double, 0, j );
			avg=avg/(double)numGestures;
			//fprintf(stderr,"%lf\n",t.val[0]);
			*( (double*)CV_MAT_ELEM_PTR( *((CvMat *)Means[i]),0,k ) ) = avg;
		}*/
		
		// print the feature vectors
			/*for(k=0;k<numTraining;k++) {	
				for(j=0;j<numFeatures;j++)		
					fprintf(stderr," %lf ",*( (double*)CV_MAT_ELEM_PTR( *((CvMat *)tmp[k]),0,j ) ));
				fprintf(stderr,";\n",i);
			}
		fprintf(stderr,"covs now\n");*/
		//for(i=0;i<numGestures;i++) {
			
			cvCalcCovarMatrix( tmp, numTraining, tmpCov, Means[i],CV_COVAR_SCALE|CV_COVAR_NORMAL); //Means[i] , |2
			fprintf(stderr,"%d\n",i);			
			for(k=0;k<numFeatures;k++) {	
				for(j=0;j<numFeatures;j++)		
					fprintf(stderr," %lf ",*( (double*)CV_MAT_ELEM_PTR( *((CvMat *)tmpCov),k,j ) ));
				fprintf(stderr,";\n",i);
			}
			eigenVects[i]=cvCreateMat(numFeatures,numFeatures,CV_64FC1);
			eigenVals[i]=cvCreateMat(1,numFeatures,CV_64FC1);
			cvEigenVV(tmpCov,eigenVects[i],eigenVals[i],0);
			fprintf(stderr,"Eigenvalues:\n");
			for(k=0;k<numFeatures;k++)
			{
				fprintf(stderr," %lf ",*( (double*)CV_MAT_ELEM_PTR( *((CvMat *)eigenVals[i]),0,k ) ));
			}
			fprintf(stderr,";\n",i);
			for(k=0;k<numFeatures;k++) {	
				for(j=0;j<numFeatures;j++)		
					fprintf(stderr," %lf ",*( (double*)CV_MAT_ELEM_PTR( *((CvMat *)eigenVects[i]),k,j ) ));
				fprintf(stderr,";\n",i);
			}
			//invCovMat[i] = cvCreateMat(numFeatures,numFeatures,CV_64FC1);
			//cvInvert(tmpCov,invCovMat[i],CV_SVD);
		

		//}
	}
	//fprintf(stderr,"found averages\n");
	/*for(i=0;i<numGestures;i++) {
		fprintf(stderr,"i=%d ",i);
		for(j=0;j<numFeatures;j++)		
			fprintf(stderr," %lf ",*( (double*)CV_MAT_ELEM_PTR( *((CvMat *)Means[i]),0,j ) ));
		fprintf(stderr,"\n",i);
	}*/

}
Esempio n. 4
0
int main (int argc, char *argv[])
{
	IplImage *rgb, *depth, *tmp, *body, *hand;
	CvMat **tr, *mean, *cov;
	CvFileStorage *fs;
	int count=0, p=0, warmup=1;

	parse_args(argc, argv);

	rgb = cvCreateImage(cvSize(W,H), 8, 3);
	tr = (CvMat**)malloc(sizeof(CvMat*)*num);
	init_training_data(tr,num);
	mean = cvCreateMat(1, FD_NUM, CV_64FC1);
	cov  = cvCreateMat(FD_NUM, FD_NUM, CV_64FC1);

	fs = cvOpenFileStorage(outfile, NULL, CV_STORAGE_WRITE, NULL);
	assert(fs);

	for (;;) {
		int z, k, c;
		CvMat *fd;
		CvSeq *cnt;
		
		tmp = freenect_sync_get_rgb_cv(0);
		cvCvtColor(tmp, rgb, CV_BGR2RGB);
		depth = freenect_sync_get_depth_cv(0);
		body = body_detection(depth);
		hand = hand_detection(body, &z);

		if (!get_hand_contour_advanced(hand, rgb, z, &cnt, NULL))
		 	continue; 

		if (warmup) {
			draw_contour(cnt);
			if (k = cvWaitKey(T) == 'g') {
				warmup = 0;
				cvDestroyAllWindows();
			}
			continue;
		}

		fd = get_fourier_descriptors(cnt);
		add_training_data(tr[count], fd);

		if (count == 0)
			printf("---> training hand pose %d\n", p);

		if (++count == num) {
			int c;

			cvCalcCovarMatrix((void*)tr, count, cov, mean, CV_COVAR_NORMAL);
			cvInvert(cov, cov, CV_LU);
			save_posture_model(fs, mean, cov);
			p++;
			count = 0;

			printf("save and quit:s  exit:q  next:any \n");
			
			if ((c = cvWaitKey(0)) == 's') {
				break;
			} else if (c == 'q') {
				break;
			} else {
				continue;
			}
		}

		draw_contour(cnt);
		cvWaitKey(T);
	}

	cvWriteInt(fs, "total", p);

	freenect_sync_stop();

	free_training_data(tr,num);
	cvReleaseFileStorage(&fs);
	cvReleaseMat(&mean);
	cvReleaseMat(&cov);
	cvReleaseImage(&rgb);

	return 0;
}
void pkmGaussianMixtureModel::modelData(int minComponents, int maxComponents, 
										double regularizingFactor, double stoppingThreshold)
{
	
	// indicator will contain the assignments of each data point to
	// the mixture components, as result of the E-step
	//	double * indicator = new double[k * m_nObservations];
	////////////////////////////////////////////////////////////
	
	
	
	////////////////////////////////////////////////////////////
	//
	//	Use as an initial approxiamation, a diagonal covariance matrix
	//	taken from the mean covariances
	//	Could instead use K-Means (see opencv function, kmeans2)
	//
	//	Alternatively, the algorithm may start with M-step when 
	//	initial values for pi,k can be provided. Another alternative, 
	//	when pi,k are unknown, is to use a simpler clustering algorithm 
	//	to pre-cluster the input samples and thus obtain initial pi,k. 
	//	Often (and in ML) k-means algorithm is used for that purpose.
	//
	//	One of the main that EM algorithm should deal with is the large 
	//	number of parameters to estimate. The majority of the parameters 
	//	sits in covariation matrices, which are d×d elements each 
	//	(where d is the feature space dimensionality). However, in many 
	//	practical problems the covariation matrices are close to diagonal, 
	//	or even to μk*I, where I is identity matrix and μk is 
	//	mixture-dependent "scale" parameter. So a robust computation 
	//	scheme could be to start with the harder constraints on the 
	//	covariation matrices and then use the estimated parameters as an 
	//	input for a less constrained optimization problem (often a 
	//	diagonal covariation matrix is already a good enough approximation).
	//
	//	References:
	//
	//	1. [Bilmes98] J. A. Bilmes. A Gentle Tutorial of the EM Algorithm 
	//	and its Application to Parameter Estimation for Gaussian Mixture 
	//	and Hidden Markov Models. Technical Report TR-97-021, 
	//	International Computer Science Institute and Computer Science 
	//	Division, University of California at Berkeley, April 1998.
	
	//// This code is for indexing (observations x variables) 
	
	emModel = new CvEM[maxComponents-minComponents+1];
	
	////////////////////////////////////////////////////////////
	// EM
	int i;
	double minBIC = HUGE_VAL;
	if(maxComponents >= m_nObservations)
	{
		maxComponents = m_nObservations-1;
	}
	if(minComponents > maxComponents)
	{
		minComponents = maxComponents = m_nObservations-1;
	}
	for (int k = minComponents; k <= maxComponents; k++)
	{
#if 0
		//////////////////////////////////////////////////////////////
		// Create a list of random indexes from 1 : K 
		// from the permutations of the number of observations
		int * randIndex = new int[m_nObservations];
		
		// 1:N
		for (i = 0; i < m_nObservations; i++)
			randIndex[i] = i;
		// Shuffle the array
		for (i = 0; i < (m_nObservations-1); i++) 
		{
			// Random position
			int r = i + (rand() % (m_nObservations-i)); 
			// Swap
			int temp = randIndex[i]; randIndex[i] = randIndex[r]; randIndex[r] = temp;
		}
		//////////////////////////////////////////////////////////////
		
		////////////////////////////////////////////////////////////
		// Random initial kernels
		float * estMU = new float[k*m_nVariables];
		for( int row = 0; row < k; row++ )
		{	
			int ind = randIndex[row];
			for( int col = 0; col < m_nVariables; col++ )
			{
				// Get each variable at index ind (of the random kernels)
				// from the input data into estMu
				estMU[row*m_nVariables+col] = ((float*)(m_pCvData->data.ptr + m_pCvData->step*ind))[col];
			}
		}
		CvMat param_mean;
		cvInitMatHeader(&param_mean, k, m_nVariables, CV_32FC1, estMU);
		////////////////////////////////////////////////////////////
		
		////////////////////////////////////////////////////////////
		// Calculate the Covariance matrix (assume this is a 2x2 Matrix)
		CvMat *m_pCvCov = cvCreateMat(m_nVariables, m_nVariables, CV_32FC1);
		CvMat *m_pCvMu = cvCreateMat(m_nVariables, 1, CV_32FC1);
		CvMat **dat = (CvMat**)cvAlloc( m_nObservations * sizeof(*dat) );
		for (i = 0; i < m_nObservations; i++)
		{
			CvMat *tempData = cvCreateMat(m_nVariables, 1, CV_32FC1);
			CV_MAT_ELEM(*tempData, float, 0, 0) = CV_MAT_ELEM(*m_pCvData, float, i, 0);
			CV_MAT_ELEM(*tempData, float, 1, 0) = CV_MAT_ELEM(*m_pCvData, float, i, 1);
			dat[i] = tempData;
		}
		cvCalcCovarMatrix((const CvArr**)dat, m_nObservations, m_pCvCov, 
						  m_pCvMu, CV_COVAR_NORMAL);	//|CV_COVAR_SCALE);
		
		// Store k (all axes) Matrices of Diagonal Covariance Matrices 
		// initialized to 1/10th of the max of the diag values 
		// of the mean variance as the estimated covariances
		CvMat **param_cov = (CvMat**)cvAlloc( k * sizeof(*param_cov) );
		float covMax = MAX(CV_MAT_ELEM(*m_pCvCov, float, 0, 0), CV_MAT_ELEM(*m_pCvCov, float, 1, 1)) / 10.;
		for (int kern = 0; kern < k; kern++)
		{
			CvMat *tempData = cvCreateMat(m_nVariables, m_nVariables, CV_32FC1);
			CV_MAT_ELEM(*tempData, float, 0, 0) = covMax;
			CV_MAT_ELEM(*tempData, float, 0, 1) = 0.0f;
			CV_MAT_ELEM(*tempData, float, 1, 0) = 0.0f;
			CV_MAT_ELEM(*tempData, float, 1, 1) = covMax;
			param_cov[kern] = tempData;
		}
		////////////////////////////////////////////////////////////
		
		////////////////////////////////////////////////////////////
		// Random mixing probabilities for each kernel
		float * estPP = new float[k];
		for (i = 0; i < k; i++)
		{
			estPP[i] = 1.0/(float)k;
		}
		// Weights for each kernel
		CvMat param_weight;
		cvInitMatHeader(&param_weight, k, 1, CV_32FC1, estPP);
		////////////////////////////////////////////////////////////
		
		////////////////////////////////////////////////////////////
		float *estProb = new float[k*m_nObservations];
		for (i = 0; i < k; i++)
		{
			for(int j = 0; j < m_nObservations; j++)
			{
				estProb[i*j] = estPP[i] / 2.0;
			}
		}
		// Create a Cv Matrix for the mix prob
		CvMat param_prob;
		cvInitMatHeader(&param_prob, m_nObservations, k, CV_32FC1, estProb);
		////////////////////////////////////////////////////////////
		
		
		
		// Initialize parameters
		CvEMParams emParam;
		emParam.covs = (const CvMat **)param_cov;
		emParam.means = &param_mean;
		emParam.weights = &param_weight;
		emParam.probs = NULL;//&param_prob;
		emParam.nclusters = k+1;
		emParam.cov_mat_type = CvEM::COV_MAT_GENERIC;//CvEM::COV_MAT_DIAGONAL;////CvEM::COV_MAT_SPHERICAL;
		emParam.start_step = CvEM::START_E_STEP; //CvEM::START_AUTO_STEP;		// initialize with k-means
		emParam.term_crit.epsilon = 0.00001;
		emParam.term_crit.max_iter = 50;
		emParam.term_crit.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS;
		
		// Train
		emModel[k-minComponents].train(m_pCvData, 0, emParam, 0);
		
		double thisLikelihood = emModel[k-minComponents].get_log_likelihood();
		//double BIC = -2.*thisLikelihood - (double)k*log((double)m_nObservations*10);
		double BIC = -m_nObservations*thisLikelihood + k/2.*log((double)m_nObservations);
		printf("K: %d, BIC: %f\n", k, BIC);
		if (BIC < minLikelihood)
		{
			bestModel = k-minComponents;
			minLikelihood = BIC;
		}
		
		delete [] randIndex;
		delete [] estMU;
		delete [] estPP;
#else
		CvEMParams emParam;
		emParam.covs = NULL;
		emParam.means = NULL;
		emParam.weights = NULL;
		emParam.probs = NULL;
		emParam.nclusters = k;
		emParam.cov_mat_type = m_covType;//CvEM::COV_MAT_SPHERICAL;//CvEM::COV_MAT_DIAGONAL;////CvEM::COV_MAT_GENERIC;//;
		emParam.start_step = CvEM::START_AUTO_STEP; //CvEM::START_AUTO_STEP;		// initialize with k-means
		emParam.term_crit.epsilon = 0.01;
		emParam.term_crit.max_iter = 100;
		emParam.term_crit.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS;
		
		
		// Train
		emModel[k-minComponents].train(m_pCvData, 0, emParam, 0);
		
		// Calculate the log likelihood of the model
		const CvMat *weights = emModel[k-minComponents].get_weights();
		const CvMat *probs = emModel[k-minComponents].get_probs();
		const CvMat **modelCovs = emModel[k-minComponents].get_covs();
		const CvMat *modelMus = emModel[k-minComponents].get_means();
		const CvMat *modelWeights = emModel[k-minComponents].get_weights();
		
		double thisLikelihood;
		if(k == 1)
			// mlem.cpp does not calculate the log_likelihood for 1 cluster 
			// (why i have no idea?! it sets log_likelihood = DBL_MAX/1000.;!?)
			// so i compute it here.  though this seems to pair up with the 
			// same value you get for 2 kernels, it does not pair up for 
			// anything higher?
		{
			double _log_likelihood = 0;//-CV_LOG2PI * (double)m_nObservations * (double)m_nVariables / 2.;
			CvMat *pts = cvCreateMat(m_nVariables, 1, CV_64FC1);
			CvMat *mean = cvCreateMat(m_nVariables, 1, CV_64FC1);
			
			for( int n = 0; n < m_nObservations; n++ )
			{
				double sum = 0;
				cvmSet(pts, 0, 0, cvmGet(m_pCvData, n, 0));
				cvmSet(pts, 1, 0, cvmGet(m_pCvData, n, 1));
				double* pp = (double*)(probs->data.ptr + probs->step*n);
				
				for( int d = 0; d < k; d++ )
				{
					const CvMat * covar = modelCovs[d];
					
					cvmSet(mean, 0, 0, cvmGet(modelMus, d, 0));
					cvmSet(mean, 1, 0, cvmGet(modelMus, d, 1));
					
					double p_x = multinormalDistribution(pts, mean, covar);
					double w_k = cvmGet(weights, 0, d);
					sum += p_x * w_k;// * pp[d];
					//printf("%f + %f += %f\n", p_x, w_k, sum);
				}
				
				_log_likelihood -= log(sum);
			}
			thisLikelihood = -_log_likelihood;//emModel[k-minComponents].get_log_likelihood();
		}
		else
		{
			thisLikelihood = emModel[k-minComponents].get_log_likelihood();
		}
		
		// Calculate the Bit Information Criterion for Model Selection
		double vars = (double)m_nVariables; 
		double N_p = ((double)k-1.)+(double)k*(vars + vars*(vars+1.)/2.);
		double BIC = -2.*thisLikelihood + N_p*log((double)m_nObservations);
		//printf("K: %d, like: %f, BIC: %f\n", k, thisLikelihood, BIC);
		if (BIC < minBIC)
		{
			// update variables with the best bic and best model subscript
			bestModel = k-minComponents;
			minBIC = BIC;
			
			// store the bic and likelihood for printing later
			m_BIC = BIC;
			m_Likelihood = thisLikelihood;
		}
		
#endif
		
	}
    bModeled = true;
	//	m_pCvProb = emModel.get_probs;
	
	
}