Exemplo n.º 1
0
FT Scene::optimize_positions_via_gradient_ascent(FT timestep, bool update)
{
    std::vector<Point> points;
    collect_visible_points(points);

    std::vector<Vector> gradient;
    compute_position_gradient(gradient);
    
    if (timestep <= 0.0)
    {
        double mean_capacity = compute_mean(m_capacities);
        double max_alpha = 1.0 / mean_capacity;
        LSPositions line_search(this, 10, max_alpha);        
        timestep = line_search.run_bt(points, gradient);
    } else {    
        for (unsigned i = 0; i < points.size(); ++i)
        {
            Point  pi = points[i];
            Vector gi = gradient[i];
            points[i] = pi + timestep*gi;
        }
        update_positions(points);
        if (update) update_triangulation();
    }
    
    compute_position_gradient(gradient);
    return compute_norm(gradient);
}
Exemplo n.º 2
0
 float compute_variance()
 {
     float mean = compute_mean();
     float sum = 0;
     for (int i = 0; i < get_current_size(); i++)
     {
         sum += pow(buffer[i] - mean, 2);
     }
     return ((float) sqrt((sum / get_current_size())));   
 }
Exemplo n.º 3
0
//unbiased estimator using size - 1 instead of size
double compute_std(long *dt, int size) {
	double delta, mean, std, var = 0;
	mean = compute_mean(dt, size);
	long *dt_ptr = dt;
	for(dt_ptr; dt_ptr < dt + size; dt_ptr++) {
		delta = (mean - (double) *dt_ptr);
		var += delta * delta / (double) (size - 1);
	}
	std = sqrt(var);
	return std;
}
Exemplo n.º 4
0
	void compute_mean_and_covariance() {
		compute_mean(pose, velocity);
		covariance.setZero();

		for (int i = 0; i < 25; i++) {
			Vector12 eps;

			eps.template head<6>() = SE3Type::log(
					pose.inverse() * sigma_pose[i]);
			eps.template tail<6>() = sigma_velocity[i] - velocity;

			covariance += eps * eps.transpose();
		}
		covariance /= 2;

	}
Exemplo n.º 5
0
/* 
 * Assumes the data is gaussian, rejects outliers that are nsigma away from the mean.
 * Sets outlier values to be NAN in the input data array.
 * returns the number of rejected values.
 */
int reject_outliers(double data[], int size, float nsigma)
{
	double mean, sigma;
	int i, reject_count;

	mean = compute_mean(data, 0,size);
	sigma = compute_sigma(data, size, mean);
	reject_count = 0;
	for (i=0; i<size; i++) {
		if (isnan(data[i])) continue;
		if (fabs(mean - data[i]) > nsigma*sigma) {
			data[i] = NAN;
			reject_count++;
		}
	}
	return reject_count;
}
Exemplo n.º 6
0
/*
 *  Do EM repeatedly to fit normalization functions for each genotype
 *
 * @param Contrast - contrast values for each snp
 * @param Strength - strength values for each snp
 * @param max_iterations - don't go to infinity
 * @param log_lik_convergence - stopped making progress
 * @param NP - output prediction functions for each genotype
 */
void EMContrast(const std::vector<double> &Contrast,
                const std::vector<double> &Strength,
                const int max_iterations,
                const double log_lik_convergence,
                NormalizationPredictor &NP)
{
	// this is the only routine that needs newmat
	// so I can isolate it!
	vector<double> mu;
	vector<double> sigma;
	vector<double> probs;
	vector<double> wAA,wAB,wBB;
	vector<double> mAA,mAB,mBB;
	vector<double> zAA,zAB,zBB;
	vector<double> lik_per_snp;

	double last_log_lik,log_lik_diff,curr_log_lik;
	unsigned int n_iterations,nSNP;

	InitializeEMVars(mu,sigma,probs,Contrast);
	NP.TargetCenter.resize(3);
	NP.TargetCenter[0]=-.66;
	NP.TargetCenter[1]=0;
	NP.TargetCenter[2]= .66;
	NP.CenterCovariates.resize(3);
	NP.CenterCovariates[0] = 0;
	NP.CenterCovariates[1] = 0;
	NP.CenterCovariates[2] = 0;

	nSNP = Contrast.size();
	mAA.resize(nSNP);
	mAB.resize(nSNP);
	mBB.resize(nSNP);
	wAA.resize(nSNP);
	wAB.resize(nSNP);
	wBB.resize(nSNP);
	zAA.resize(nSNP);
	zAB.resize(nSNP);
	zBB.resize(nSNP);
	lik_per_snp.resize(nSNP);

	FillVector(mBB,mu[0]);
	FillVector(mAB,mu[1]);
	FillVector(mAA,mu[2]);
	Compute_Weights(wAA,mAA,sigma[2],Contrast);
	Compute_Weights(wAB,mAB,sigma[1],Contrast);
	Compute_Weights(wBB,mBB,sigma[0],Contrast);

	last_log_lik = -1000000;
	log_lik_diff = log_lik_convergence+1;
	n_iterations = 0;

	while (log_lik_diff > log_lik_convergence && n_iterations < max_iterations){
		n_iterations += 1;
		// E step
		// update probs for each group
		// relative genotype probability per SNP
		// relative likelihood steps
		LikelihoodPerSNP(lik_per_snp,wAA,wAB,wBB,probs);
		//cout<< "likpersnp\t";
		//td(lik_per_snp);
		GenotypePerSNP(zAA,wAA,lik_per_snp,probs[2]);
		GenotypePerSNP(zAB,wAB,lik_per_snp,probs[1]);
		GenotypePerSNP(zBB,wBB,lik_per_snp,probs[0]);
		/*td(zAA);
		td(zAB);
		td(zBB);*/
		probs[0] = compute_mean(zBB);
		probs[1] = compute_mean(zAB);
		probs[2] = compute_mean(zAA);
		//cout << "probs:\t";
		//td(probs);
		curr_log_lik = ComputeLL(lik_per_snp);
		//cout << "curr_lik:\t" << curr_log_lik << endl;
		log_lik_diff = curr_log_lik-last_log_lik;
		last_log_lik = curr_log_lik;
		// done with E step

		// now do the big M step
		// do magic: generate predictors for each group
		// how to do magic:  SVD of of design matrix outside of loop
		// do magic within loop
		// fitAA = predictor weighted by zAA
		// mAA = current predictions for this
		// fitAB = predictor weighted by zAB
		// mAB = current predictions for this
		// fitBB = predictor weighted by zBB
		// mBB = current predictions for this genotype by snp
		//Dummy_Fit(Contrast,mAA,zAA);
		Dummy_Fit(Contrast,Strength,zAB,NP.fitAB,mAB);
		//Dummy_Fit(Contrast,mBB,zBB);
		// try the real one
		FitWeightedCubic(Contrast,Strength,zAA,NP.fitAA,mAA);
		//FitWeightedCubic(Contrast,Strength,zAB,NP.fitAB,mAB);
		FitWeightedCubic(Contrast,Strength,zBB,NP.fitBB,mBB);

		/*td(zAA);
		td(mAA);
		td(zAB);
		td(mAB);
		td(zBB);
		td(mBB);*/

		sigma[0] = WeightedSigma(Contrast,mBB,zBB);
		sigma[1] = WeightedSigma(Contrast,mAB,zAB);
		sigma[2] = WeightedSigma(Contrast,mAA,zAA);
		MinSigma(sigma, .001);
		Compute_Weights(wAA,mAA,sigma[2],Contrast);
		Compute_Weights(wAB,mAB,sigma[1],Contrast);
		Compute_Weights(wBB,mBB,sigma[0],Contrast);
		HardShell(Contrast,wAA,wBB);
		/*td(wAA);
		td(wAB);
		td(wBB);
		cout << "sigma\t";
		td(sigma);*/

	}
	/*cout << "Fitted values" << endl;
	unsigned int dummy;
	for (dummy=0; dummy<Contrast.size(); dummy++)
	{
		cout << dummy << "\t";
		cout << Contrast[dummy] << "\t";
		cout << Strength[dummy] << "\t";
		cout << mAA[dummy] << "\t";
		cout << mAB[dummy] << "\t";
		cout << mBB[dummy] << "\t";
		cout << zAA[dummy] << "\t";
		cout << zAB[dummy] << "\t";
		cout << zBB[dummy] << "\t";
		cout << endl;
	}*/
	//td(Contrast);
	//td(mAA);
	//td(mAB);
	//td(mBB);
	// transfer useful data out of program
	// need: predictor vectors fAA, fAB, fBB
	// need: sigma
	NP.sigma.resize(3);
	NP.sigma[0]=sigma[0];
	NP.sigma[1]=sigma[1];
	NP.sigma[2]=sigma[2];
}
static void onlineClusterKNN_cluster(t_onlineClusterKNN *x, t_symbol *s, int argc, t_atom *argv)
{
    int i, j, k, instanceIdx, listLength;
    float min_dist,dist;
    
	instanceIdx = x->numInstances;
	listLength = argc;
	//s=s; // to get rid of 'unused variable' warning
    //post("list length: %i", listLength);
    
    if((x->featureLength>0) && (x->featureLength != listLength))
	{
        post("received list of length %i and expected %i", listLength, x->featureLength); 
        return;
    }    
    
	x->instances = (t_instance *)t_resizebytes_(x->instances, x->numInstances * sizeof(t_instance), (x->numInstances+1) * sizeof(t_instance));
	x->instanceFeatureLengths = (int *)t_resizebytes_(x->instanceFeatureLengths, x->numInstances * sizeof(int), (x->numInstances+1) * sizeof(int));
	
	x->instanceFeatureLengths[instanceIdx] = listLength;
	x->instances[instanceIdx].instance = (float *)t_getbytes_(listLength * sizeof(float));    
    
	x->numInstances++;
	//post("no. of instances: %i", x->numInstances);          
    
    
    //get the data
	for(i=0; i<listLength; i++)
		x->instances[instanceIdx].instance[i] = atom_getfloat(argv+i);
    
    //test if received element is zeros vector
    if (test_zero(x->instances[instanceIdx].instance,listLength) == 0)
    {
        //post("instance cannot be zeros vector");
        //rollback
        t_freebytes_(x->instances[instanceIdx].instance, x->instanceFeatureLengths[instanceIdx]*sizeof(float));
        x->instances = (t_instance *)t_resizebytes_(x->instances, x->numInstances * sizeof(t_instance), (x->numInstances-1) * sizeof(t_instance));
        x->instanceFeatureLengths = (int *)t_resizebytes_(x->instanceFeatureLengths, x->numInstances * sizeof(int), (x->numInstances-1) * sizeof(int)); 
        
        x->numInstances--;
        
    }
    else 
    {
        
        if(instanceIdx == 0)
        {
            x->featureInput = (float *)t_resizebytes_(x->featureInput, x->featureLength * sizeof(float), listLength * sizeof(float));		
            x->featureLength = listLength;  
            
            x->num = (int *)t_resizebytes_(x->num, 0 * sizeof(int), x->numClusters * sizeof(int));
            
            // initialize means randomly for each cluster        
            for(i=0; i<x->numClusters; i++)
            {
                x->means = (t_instance *)t_resizebytes_(x->means, i * sizeof(t_instance), (i+1) * sizeof(t_instance));
                x->means[i].instance = (float *)t_getbytes_(listLength * sizeof(float)); 
                
                if (x->randomFlag == 1)
                {
                    for(j=0; j<listLength; j++)
                    {
                        srand(i*j+i+j+1);
                        x->means[i].instance[j] = (float)rand()/(float)RAND_MAX;            
                    }
                }
            }                
            // initialize number of instances for each cluster
            for(i=0; i<x->numClusters; i++)
                x->num[i] = 0;
        };
        
        
        //normalize the data to be 0-1
        for(i=0; i<listLength; i++)
            if (x->instances[instanceIdx].instance[i]>1)
            {            
                x->instances[instanceIdx].instance[i] = x->instances[instanceIdx].instance[i] * pow(10,-countDigits((int)(x->instances[instanceIdx].instance[i])));
            }
        
        //////////////ONLINE CLUSTERING
        
        //initialize means with the first instances if random==0
        if ((x->randomFlag == 0) && (instanceIdx < x->numClusters))
        {
            for(j=0; j<listLength; j++)
            {
                x->means[instanceIdx].instance[j] =  x->instances[instanceIdx].instance[j];
            }
            x->instances[instanceIdx].cluster = instanceIdx;
            x->num[instanceIdx] = 1;
        }
        else
        {        
            //compute distances to the means and determine the closest cluster 
            min_dist = 99999999;
            k = -1;
            for(i=0; i<x->numClusters; i++)
            { 
                dist = squared_euclid(x->means[i].instance,x->instances[instanceIdx].instance,listLength);
                //post("%6.20f", dist);
                if (dist < min_dist)
                {
                    min_dist = dist;
                    k = i;
                }
            }   
            
            //add the new instance to the found cluster
            //post("cluster %i", k);
            if (k != -1)
            {
                x->num[k] = x->num[k] + 1;
                compute_mean(x, k, x->instances[instanceIdx]);
                x->instances[instanceIdx].cluster = k;     
            }  
        }
        
        outlet_int(x->id, x->instances[instanceIdx].cluster);    
    }
}
Exemplo n.º 8
0
double compute_clean_mean(double data[], int size, float nsigma)
{
	do {} while (reject_outliers(data, size, nsigma) > 0);
	return compute_mean(data, 0,size);
}