Exemple #1
0
typename TypeTraits<TVector>::LargerComponentType Correlation(const TVector& v1, const TVector& v2)
{
  // http://docs.opencv.org/doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.html
  // d(H_1, H_2) = \frac{\sum_i(H_1(i) - \bar{H_1})(H_2(i)-\bar{H_2})}{sqrt(var(H_1)var(H_2))}

  assert(Helpers::length(v1) > 0);
  assert(Helpers::length(v1) == Helpers::length(v2));

  float numerator = 0.0f;

  float meanV1 = Average(v1);
  float meanV2 = Average(v2);

  float varianceV1 = Variance(v1);
  float varianceV2 = Variance(v2);

  for(unsigned int i = 0; i < Helpers::length(v1); ++i)
  {
    numerator += (v1[i] - meanV1)*(v2[i] - meanV2);
  }

  float correlation = numerator/(sqrt(varianceV1 * varianceV2));

  return correlation;
}
/**
*   Função contruída com o objetivo de facilitar o treinamento de uma rede. Utiliza critérios 
* de parada  pré-definidos. O objetivo é paralizar o treinamento a partir do momento em que o 
* erro médio quadrático da rede em relação às amostras para de  diminuir. Recebe um  parâmetro 
* indicando um número mínimo de treinos, a a partir do qual se inicia a verificação da variaçao
* do erro médio quadrático. Recebe também o número de treinamentos a ser executado até que uma
* nova medição  do erro seja feita. Caso a variância (porcentual) das últimas n  medições seja 
* menor ou igual a um determinado valor (entre 0 e 1), paraliza o treinamento.
*   A função recebe ainda um conjunto de amostras (matriz de entradas/matriz de saídas), número 
* de amostras contidas nas matrizes, a dimensão de cada amostra de entrada e de cada amostra de 
* saída e um flag indicando se as amostras devem ser treinadas aleatoriamente ou em ordem.
*/
int BKPNeuralNet::AutoTrain( float**inMatrix, float **outMatrix, int inSize, int outSize, int nSamples, 
              int minTrains, int varVectorSize, float minStdDev, int numTrains, TrainType type, 
              float l_rate, float momentum, int* retExecutedTrains )
{
  // Casos de retorno:
  if( (!inMatrix) || (!outMatrix) || (inSize!=_nLayers[0]) || (_nLayers[_layers-1]!=outSize) )
    return -1;

  // O número de treinamentos inicial tem que ser pelo menos 0:
  if( *retExecutedTrains < 0 )
    *retExecutedTrains = 0;

  int thisSample = -1;    //< Variável auxiliar, indica a amostra a ser treinada.
  // Executando os treinamentos obrigatórios:
  for( int i=0 ; i<minTrains ; i++ )
  {
    if( type == ORDERED_TRAIN )
      thisSample = (++thisSample)%nSamples;
    if( type == RANDOM_TRAIN )
      thisSample = RandInt(0, (nSamples-1));
    Train( inSize, inMatrix[thisSample], outSize, outMatrix[thisSample], l_rate, momentum );
  }

  // Executando os demais treinamentos:
  float* varVector = new float[varVectorSize];  //< Vetor para conter as últimas medições de erro.
  int ptVarVector = 0;              //< Aponta para a primeira posição vazia de varVector.
  float lastVariance = (float)MAX_VALUE;   //< Variâvel que mantém o valor da varirância.
  float StdDev = (float)MAX_VALUE;   //< Variâvel que mantém o valor do desvio-padrão. 
  thisSample = -1;
  int nTrains=minTrains + *retExecutedTrains;  //< Mantém o número de treinamentos executados.
  bool varFlag = false;
  while( StdDev > minStdDev )
  {
    if( type == ORDERED_TRAIN )
      thisSample = (++thisSample)%nSamples;
    if( type == RANDOM_TRAIN )
      thisSample = RandInt(0, (nSamples-1));
    Train( inSize, inMatrix[thisSample], outSize, outMatrix[thisSample], l_rate, momentum );
    if( (nTrains%numTrains) == 0 ) //< A cada numTrains treinamentos, testa o erro:
    {
      float retRMS_Error = 0;
      float mean = 0;
      RMS_error( inMatrix, outMatrix, inSize, outSize, nSamples, &retRMS_Error );
      varFlag = ShiftLeft( varVector, varVectorSize, retRMS_Error, ptVarVector );
      if( varFlag == true )
      {
        lastVariance = Variance( varVector, varVectorSize, &mean );
        StdDev = ((float)sqrt(lastVariance))/mean;
      }
      ptVarVector++;
    }
    nTrains++;
    if( nTrains >= 90000 )   //< O número máximo de treinamentos será 150000.
      StdDev = minStdDev;

  }
  *retExecutedTrains = nTrains;
  return 0;
}
double ChiRand::Skewness() const
{
    double mu = Mean();
    double sigmaSq = Variance();
    double skew = mu * (1 - 2 * sigmaSq);
    skew /= std::pow(sigmaSq, 1.5);
    return skew;
}
Exemple #4
0
static void chanfunc_CalcStatistics (channelPtr chan)
{
    double mean, std_dev, variance, rms, moment, median, mode, min, max;
    int err, order, min_i, max_i, intervals;
    char newnote[256];

    Fmt (chanfunc.note, "");
    err = MaxMin1D (chan->readings, chan->pts, &max, &max_i, &min, &min_i);
    SetInputMode (chanfunc.p, STATISTICS_MIN, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_MIN, min);
    SetInputMode (chanfunc.p, STATISTICS_MAX, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_MAX, max);
    if (err == NoErr)
    {
        Fmt (chanfunc.note, "%s<Min: %f[e2p5]\n", min);
        Fmt (chanfunc.note, "%s[a]<Max: %f[e2p5]\n", max);
    }

    err = Mean (chan->readings, chan->pts, &mean);
    SetInputMode (chanfunc.p, STATISTICS_MEAN, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_MEAN, mean);
    if (err == NoErr) Fmt (chanfunc.note, "%s[a]<Mean: %f[e2p5]\n", mean);

    err = StdDev (chan->readings, chan->pts, &mean, &std_dev);
    SetInputMode (chanfunc.p, STATISTICS_STDDEV, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_STDDEV, std_dev);
    if (err == NoErr) Fmt (chanfunc.note, "%s[a]<StdDev: %f[e2p5]\n", std_dev);

    err = Variance (chan->readings, chan->pts, &mean, &variance);
    SetInputMode (chanfunc.p, STATISTICS_VAR, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_VAR, variance);
    if (err == NoErr) Fmt (chanfunc.note, "%s[a]<Variance: %f[e2p5]\n", variance);

    err = RMS (chan->readings, chan->pts, &rms);
    SetInputMode (chanfunc.p, STATISTICS_RMS, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_RMS, rms);
    if (err == NoErr) Fmt (chanfunc.note, "%s[a]<RMS: %f[e2p5]\n", rms);

    GetCtrlVal (chanfunc.p, STATISTICS_ORDER, &order);
    err = Moment (chan->readings, chan->pts, order, &moment);
    SetInputMode (chanfunc.p, STATISTICS_MOMENT, !err);
    SetInputMode (chanfunc.p, STATISTICS_ORDER, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_MOMENT, moment);
    if (err == NoErr) Fmt (chanfunc.note, "%s[a]<Moment: %f[e2p5] (order: %i)\n", moment, order);

    err = Median (chan->readings, chan->pts, &median);
    SetInputMode (chanfunc.p, STATISTICS_MEDIAN, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_MEDIAN, median);
    if (err == NoErr) Fmt (chanfunc.note, "%s[a]<Median: %f[e2p5]\n", median);

    GetCtrlVal (chanfunc.p, STATISTICS_INTERVAL, &intervals);
    err = Mode (chan->readings, chan->pts, min, max, intervals, &mode);
    SetInputMode (chanfunc.p, STATISTICS_INTERVAL, !err);
    SetInputMode (chanfunc.p, STATISTICS_MODE, !err);
    SetCtrlVal (chanfunc.p, STATISTICS_INTERVAL, intervals);
    SetCtrlVal (chanfunc.p, STATISTICS_MODE, mode);
    if (err == NoErr) Fmt (chanfunc.note, "%s[a]<Mode: %f[e2p5] (intervals: %i)\n", mode, intervals);
}
double NakagamiDistribution::Skewness() const
{
    double thirdMoment = lgammaShapeRatio;
    thirdMoment -= 1.5 * Y.GetLogRate();
    thirdMoment = (m + 0.5) * std::exp(thirdMoment);
    double mean = Mean();
    double variance = Variance();
    return (thirdMoment - mean * (3 * variance + mean * mean)) / std::pow(variance, 1.5);
}
Exemple #6
0
int main()
{
  std::vector<int> a = {1,2,3,4,5,6,7};

  std::vector<int> psum = PartialSum(a);
  printf("%f\n", Variance(SquarePartialSum(a), PartialSum(a), 2, 4));

  return 0;
}
double ChiRand::ExcessKurtosis() const
{
    double mu = Mean();
    double sigmaSq = Variance();
    double sigma = std::sqrt(sigmaSq);
    double skew = Skewness();
    double kurt = 1.0 - mu * sigma * skew;
    kurt /= sigmaSq;
    --kurt;
    return 2 * kurt;
}
double UnivariateDistribution<T>::ThirdMoment() const
{
    double mean = Mean();
    double variance = Variance();
    double skewness = Skewness();

    double moment = skewness * std::sqrt(variance) * variance;
    moment += mean * mean * mean;
    moment += 3 * mean * variance;
    return moment;
}
Exemple #9
0
void ParallelPlane::UpdateText(){

    float var = Variance();

    QString var_text = (var<0)?"NA":QString("Variance:%1").arg(var);
    text_->setPosition(osg::Vec3(0.02,0.1,0.01f));
    QString txt = QString("XAxis: %1\nYAxis: %2\n%3").arg(db_->get_header(axes_[0])).arg(db_->get_header(axes_[1])).arg(var_text);
    printf("Text: %s\n",txt.toStdString().c_str());
    text_->setText(txt.toStdString());

}
void RunningStats::Print(FILE * pFile, const char * header) const
    {
    fprintf (pFile, "\n%s\n", header);
    fprintf (pFile, "NumDataValues:     %ld\n", NumDataValues());
    fprintf (pFile, "Mean:              %f\n", Mean());
    fprintf (pFile, "Variance:          %f\n", Variance());
    fprintf (pFile, "StandardDeviation: %f\n", StandardDeviation());
    fprintf (pFile, "Skewness:          %f\n", Skewness());
    fprintf (pFile, "Kurtosis:          %f\n", Kurtosis());
    fprintf (pFile, "Maximum:           %f\n", Maximum());
    fprintf (pFile, "Minimum:           %f\n", Minimum());
    return;
    }
double UnivariateDistribution<T>::FourthMoment() const
{
    double mean = Mean();
    double variance = Variance();
    double moment3 = ThirdMoment();
    double kurtosis = Kurtosis();
    double meanSq = mean * mean;

    double moment = kurtosis * variance * variance;
    moment -= 6 * meanSq * variance;
    moment -= 3 * meanSq * meanSq;
    moment += 4 * mean * moment3;
    return moment;
}
double UnivariateDistribution<T>::Skewness() const
{
    double var = Variance();
    if (!std::isfinite(var))
        return NAN;
    double mu = Mean(); /// var is finite, so is mu

    double sum = ExpectedValue([this, mu] (double x)
    {
        double xmmu = x - mu;
        double skewness = xmmu * xmmu * xmmu;
        return skewness;
    }, this->MinValue(), this->MaxValue());

    return sum / std::pow(var, 1.5);
}
double UnivariateDistribution<T>::ExcessKurtosis() const
{
    double var = Variance();
    if (!std::isfinite(var))
        return NAN;
    double mu = Mean(); /// var is finite, so is mu

    double sum = ExpectedValue([this, mu] (double x)
    {
        double xmmu = x - mu;
        double kurtosisSqrt = xmmu * xmmu;
        double kurtosis = kurtosisSqrt * kurtosisSqrt;
        return kurtosis;
    }, this->MinValue(), this->MaxValue());

    return sum / (var * var) - 3;
}
Exemple #14
0
 /// <summary>
 ///     Computes the standard deviation for a given history.
 /// </summary>
 /// <returns>
 ///     The standard deviation.
 /// </returns>
 double HillClimbing::MeasuredHistory::StandardDeviation()
 {
     return sqrt(Variance());
 }
double RunningStats::StandardDeviation() const
{
    return sqrt( Variance() );
}
void
JPlotLinearFit::LinearLSQ1()
{
	const JPlotDataBase* data = GetDataToFit();
	J2DDataPoint point;
	const JSize count = data->GetElementCount();
	JArray<JFloat> weight;
	JArray<JFloat> sigma;
	JSize i;

	JFloat vx, vy;
	Variance(&vx,&vy);
	JFloat resize = vy/vx;
	JFloat num = 0;
	JFloat avgx = 0;

	for (i = 1; i <= count; i++)
		{
		if (GetDataElement(i, &point))
			{
			JFloat sy = point.yerr;
			JFloat sx = point.xerr;
			if (sy == 0)
				{
				sy = 1;
				}
			JFloat s = 0;
			if (!itsXIsLog)
				{
				if (!itsYIsLog)
					{
					s = sqrt(sy * sy + resize * resize * sx * sx);
					}
				else
					{
					s = sqrt(sy * sy + resize * resize * point.y * point.y * sx * sx);
					}
				}
			else
				{
				// do for power law;
				}
			sigma.AppendElement(s);
			JFloat w = 1/(s*s);
			weight.AppendElement(w);
			num += w;
			avgx += w * point.x;
			itsRealCount++;
			}
		}

	avgx /= num;

	JArray<JFloat> t;
	JFloat stt = 0;
	JFloat b = 0;
	JSize counter = 1;
	for (i = 1; i <= count; i++)
		{
		if (GetDataElement(i, &point))
			{
			JFloat tTemp = (point.x - avgx)/(sigma.GetElement(counter));
			t.AppendElement(tTemp);
			stt += 	tTemp * tTemp;
			b += tTemp * point.y / sigma.GetElement(counter);
			counter++;
			}
		}
	b /= stt;

	JFloat a = 0;
	JFloat aerr = 0;
	counter = 1;
	for (i = 1; i <= count; i++)
		{
		if (GetDataElement(i, &point))
			{
			JFloat w = weight.GetElement(counter);
			a += w * (point.y - b * point.x);
			aerr += w * w * point.x * point.x;
			counter++;
			}
		}
	a /= num;
	aerr /= (num * stt);
	aerr += 1;
	aerr /= num;
	aerr = sqrt(aerr);

	JFloat berr = sqrt(1.0/stt);

	JFloat c = 0;
	JBoolean sytest = kJTrue;
	counter = 1;
	for (i = 1; i <= count; i++)
		{
		if (GetDataElement(i, &point))
			{
			JFloat temp = (point.y - a - b * point.x)/(sigma.GetElement(counter));
			c += temp * temp;
			if (sigma.GetElement(counter) != 1)
				{
				sytest = kJFalse;
				}
			counter++;
			}
		}

	if (sytest)
		{
		JFloat sig = sqrt(c/count);
		aerr = aerr * sig;
		berr = berr * sig;
		}

	itsAParameter = a;
	itsAErrParameter = aerr;
	itsBParameter = b;
	itsBErrParameter = berr;
	itsChi2 = c;
}
 double StandardDeviation() const
 {
     return sqrt( Variance() );
 }
double UnivariateDistribution<T>::SecondMoment() const
{
    double mean = Mean();
    return mean * mean + Variance();
}
Exemple #19
0
double DataSource::StdDev(Getdatafun getdata, double avg) {
	return sqrt(Variance(getdata, avg));
}
Exemple #20
0
void System::runMonteCarlo()
{
    int i, j, k, NOA;
    int alpha_max = Alpha.n_elem;
    int beta_max = Beta.n_elem;
    double I, I2, dx; // total energy integral
    double T, T2, dt; // kinetic energy integral
    double V, V2, dv; // potential energy integral
    bool Accepted;

    for (i=0; i<alpha_max; i++)    // LOOP OVER ALPHA VALUES
    {
        Wavefunction->setAlpha(i);
        TypeHamiltonian->getWavefunction()->setAlpha(i);

        for (j=0; j<beta_max; j++)    // LOOP OVER BETA VALUES
        {
            Wavefunction->setBeta(j);
            TypeHamiltonian->getWavefunction()->setBeta(j);
            dx = I = I2 = NOA = 0;
            dt = T = T2 = 0;
            dv = V = V2 = 0;

            // BRUTE FORCE METROPOLIS:
            for (k=0; k<NumberOfCycles; k++)
            {
                Accepted = newStepMetropolis(); // NEW STEP: ACCEPTED OR REFUSED
                if (Accepted)
                {
                    // LOCAL ENERGY
                    dx = TypeHamiltonian->evaluateLocalEnergy(OldPosition);
                    I += dx;
                    I2 += dx*dx;

                    // LOCAL KINETIC ENERGY
                    dt = TypeHamiltonian->getKineticEnergy();
                    T += dt;
                    T2 += dt*dt;

                    // LOCAL POTENTIAL ENERGY
                    dv = TypeHamiltonian->getPotentialEnergy();
                    V += dv;
                    V2 += dv*dv;

                    NOA++;
                }
                else
                {
                    I += dx;
                    I2 += dx*dx;

                    T += dt;
                    T2 += dt*dt;

                    V += dv;
                    V2 += dv*dv;
                }

            }

            NumberOfAcceptedSteps(i,j) = NOA;
            Energy(i,j) = I/double(NumberOfCycles);
            EnergySquared(i,j) = I2/double(NumberOfCycles);
            Variance(i,j) = (EnergySquared(i,j) - Energy(i,j)*Energy(i,j));
            KineticEnergy(i,j) = T/double(NumberOfCycles);
            KineticEnergySquared(i,j) = T2/double(NumberOfCycles);
            PotentialEnergy(i,j) = V/double(NumberOfCycles);
            PotentialEnergySquared(i,j) = V2/double(NumberOfCycles);
        }
    }
}
Exemple #21
0
void System::importanceSampling()
{
    int NOA;            // Number Of Accepted steps
    double I, I2, dx;   // total energy integral
    double T, T2, dt;   // kinetic energy integral
    double V, V2, dv;   // potential energy integral
    int a,b,c,i,j;      // loop variables: a->alpha, b->beta, c->cycles, i->particle, j-> dimension
    int amax = Alpha.n_elem;    // number of total alpha values
    int bmax = Beta.n_elem;     // number of total beta values
    double wf_new, wf_old;
    double greensfunction;
    double D = 0.5;     // diffusion constant


    for (a=0; a<amax; a++)    // LOOP OVER ALPHA VALUES
    {
        Wavefunction->setAlpha(a);
        TypeHamiltonian->getWavefunction()->setAlpha(a);

        for (b=0; b<bmax; b++)    // LOOP OVER BETA VALUES
        {
            Wavefunction->setBeta(b);
            TypeHamiltonian->getWavefunction()->setBeta(b);

            dx = I = I2 = NOA = 0;
            dt = T = T2 = 0;
            dv = V = V2 = 0;

            // IMPORTANCE SAMPLING:
            for (c=0; c<NumberOfCycles; c++)
            {
                dx = 0;
                for (i=0; i<NumberOfParticles; i++)
                {
                    // Taking a new, random step, moving one particle only:
                    NewPosition = OldPosition;
                    NewPosition.row(i) = OldPosition.row(i)+Rnd->nextGauss(0,sqrt(StepLength))+QuantumForceOld.row(i)*StepLength*D;

                    wf_new = Wavefunction->evaluateWavefunction(NewPosition);
                    quantumForce(NewPosition,QuantumForceNew,wf_new);

                    // Metropolis-Hastings algorithm:
                    greensfunction = 0.0;
                    for(j=0;j<NumberOfDimensions;j++)
                    {
                        greensfunction += 0.5*(QuantumForceOld(i,j) + QuantumForceNew(i,j))*(D*StepLength*0.5*(QuantumForceOld(i,j)-QuantumForceNew(i,j)) - NewPosition(i,j) + OldPosition(i,j));
                    }
                    greensfunction = exp(greensfunction);

                    // Metropolis test:
                    if (ran0(&RandomSeed) <= greensfunction*wf_new*wf_new/(wf_old*wf_old))
                    {
                        OldPosition.row(i) = NewPosition.row(i);
                        QuantumForceOld.row(i) = QuantumForceNew.row(i);
                        Wavefunction->setOldWavefunction(wf_new);
                        wf_old = wf_new;
                    }
                }
                // Updating integral:

                // LOCAL ENERGY
                dx = TypeHamiltonian->evaluateLocalEnergy(OldPosition);
                I += dx;
                I2 += dx*dx;

                // LOCAL KINETIC ENERGY
                dt = TypeHamiltonian->getKineticEnergy();
                T += dt;
                T2 += dt*dt;

                // LOCAL POTENTIAL ENERGY
                dv = TypeHamiltonian->getPotentialEnergy();
                V += dv;
                V2 += dv*dv;

                NOA++;
            }
            NumberOfAcceptedSteps(a,b) = NOA;
            Energy(a,b) = I/double(NumberOfCycles);
            EnergySquared(a,b) = I2/double(NumberOfCycles);
            Variance(a,b) = (EnergySquared(a,b) - Energy(a,b)*Energy(a,b));
            KineticEnergy(a,b) = T/double(NumberOfCycles);
            KineticEnergySquared(a,b) = T2/double(NumberOfCycles);
            PotentialEnergy(a,b) = V/double(NumberOfCycles);
            PotentialEnergySquared(a,b) = V2/double(NumberOfCycles);
            //AvgDistance = 0;
        }
    }
}
double CAfficheMesPol::Correlation(int par1,int par2)
{
	return (Covariance(par1,par2)/(pow(Variance(par1)*Variance(par2),0.5)));
}
Exemple #23
0
double DataSource::StdDev(Getdatafun getdata, double avg) {
	double var = Variance(getdata, avg);
	return IsNull(var) ? Null : sqrt(var);
}
/*  Calcula o desvio padrao de um vetor de valores double.
    Parametro 1: vetor tipo double.
    Parametro 2: tamanho do vetor. */
double StandardDeviation(double *vec, int length)
{
    return sqrt(Variance(vec, length));
}
Exemple #25
0
 /// <summary>
 ///     Computes the mean of variances for a given history.
 /// </summary>
 /// <returns>
 ///     The mean of variances.
 /// </returns>
 double HillClimbing::MeasuredHistory::VarianceMean()
 {
     return Variance() / Count();
 }