///\param lTime The sampler iteration.
 ///\param pFrom The particle to move.
 ///\param pRng  A random number generator.
 void fMove(long lTime, smc::particle<State> & pFrom, smc::rng *pRng)
 {
   State *to = pFrom.GetValuePointer();
   int lineFieldAtPrevTime = to->lineField[0];
   // probability of a value which lineField0 takes is proportional to exp(-beta*KronekkerDelta(lineField0 + lineField1, 2))
   double normalizationConstant, probOfNoEdge;
   if(lineFieldAtPrevTime == 0){
     // no edge at previous time
     normalizationConstant = 1 + 1;
   } else {
     normalizationConstant = 1 + exp(-to->beta);
   }
   probOfNoEdge = 1 / normalizationConstant;
   // transition
   to->lineField[1] = to->lineField[0];
   if(pRng->UniformS() < probOfNoEdge){
     to->lineField[0] = 0;
   } else {
     to->lineField[0] = 1;
   }
   to->alpha += pRng->Normal(0, alpha0 / 100.0);
   to->beta += pRng->Normal(0, beta0 / 100.0);
   to->gamma += pRng->Normal(0, gamma0 / 100.0);
   pFrom.AddToLogWeight(logLikelihood(lTime, *to));
 }  
Beispiel #2
0
    /// \param pRng A pointer to the random number generator which is to be used
    smc::particle<double> fInitialise(smc::rng *pRng) {
        double x;
  
        x = pRng->Normal(0,std_x0);

        return smc::particle<double>(x,logLikelihood(0,x));
    }
/**************************************************************************************************
* Assign a terrain type to a polygon based on likelihoods
**************************************************************************************************/
void TerrainAnalyzer::assignType(SmurfPolygon* terrain) {
	// use the loglikelihood method
	int finalType = logLikelihood();

	// set the terrain type
	switch(finalType) {
	case 0:
		terrain->setType(SNOW);
		break;
	case 1:
		terrain->setType(ROCK);
		break;
	case 2:
		terrain->setType(ICE);
		break;
	case 3:
		terrain->setType(GRASS);
		break;
	case 4:
		terrain->setType(SAND);
		break;
	case 5:
		terrain->setType(WATER);
		break;
	}
}
Beispiel #4
0
void CLinearMapping::display(ostream& os) const 
{
  cout << "Linear Mapping:" << endl;
  cout << "Optimiser: " << getDefaultOptimiserStr() << endl;
  cout << "Data Set Size: " << getNumData() << endl;
  cout << "Log likelihood: " << logLikelihood() << endl;
}
Beispiel #5
0
void CMlpMapping::display(ostream& os) const 
{
  cout << "Multi-Layer Perceptron Model:" << endl;
  cout << "Optimiser: " << getDefaultOptimiserStr() << endl;
  cout << "Data Set Size: " << getNumData() << endl;
  cout << "Number hidden: " << hiddenDim << endl;
  cout << "Log likelihood: " << logLikelihood() << endl;
}
Beispiel #6
0
    ///\param lTime The sampler iteration.
    ///\param value The value of the particle being moved
    ///\param logweight The log weight of the particle being moved
    /// \param param Additional algorithm parameters
    void fMove(long lTime, cv_state & value, double & logweight, smc::nullParams & param)
    {
        value.x_pos += value.x_vel * Delta + R::rnorm(0.0,sqrt(var_s));
        value.x_vel += R::rnorm(0.0,sqrt(var_u));
        value.y_pos += value.y_vel * Delta + R::rnorm(0.0,sqrt(var_s));
        value.y_vel += R::rnorm(0.0,sqrt(var_u));

        logweight += logLikelihood(lTime, value);
    }
Beispiel #7
0
    ///\param lTime The sampler iteration.
    ///\param pFrom The particle to move.
    ///\param pRng  A random number generator.
    void fMove(long lTime, smc::particle<double> & pFrom, smc::rng *pRng) {
        double *to = pFrom.GetValuePointer();

        double x = 0.5 * (*to) + 25.0*(*to) / (1.0 + (*to) * (*to)) + 8.0 * cos(1.2  * ( lTime)) + pRng->Normal(0.0,std_x);
        
        *to = x;

        pFrom.AddToLogWeight(logLikelihood(lTime, *to));
    }
Beispiel #8
0
    /// \param value The value of the particle being moved
    /// \param logweight The log weight of the particle being moved
    /// \param param Additional algorithm parameters
    void fInitialise(cv_state & value, double & logweight, smc::nullParams & param)
    {
        value.x_pos = R::rnorm(0.0,sqrt(var_s0));
        value.y_pos = R::rnorm(0.0,sqrt(var_s0));
        value.x_vel = R::rnorm(0.0,sqrt(var_u0));
        value.y_vel = R::rnorm(0.0,sqrt(var_u0));

        logweight = logLikelihood(0,value);
    }
 /// \param pRng A pointer to the random number generator which is to be used
 smc::particle<State> fInitialise(smc::rng *pRng)
 {
   State x;
   x.lineField = vector<int>(2);
   x.lineField[0] = 0;
   x.lineField[1] = 0;
   x.alpha = alpha0;
   x.beta  = beta0;
   x.gamma = gamma0;
   return smc::particle<State>(x,logLikelihood(0,x));
 }
Beispiel #10
0
void bob::learn::em::GMMMachine::accStatistics(const blitz::Array<double, 1>& x, bob::learn::em::GMMStats& stats) const {
  // check GMMStats size
  bob::core::array::assertSameDimensionLength(stats.sumPx.extent(0), m_n_gaussians);
  bob::core::array::assertSameDimensionLength(stats.sumPx.extent(1), m_n_inputs);

  // Calculate Gaussian and GMM likelihoods
  // - m_cache_log_weighted_gaussian_likelihoods(i) = log(weight_i*p(x|gaussian_i))
  // - log_likelihood = log(sum_i(weight_i*p(x|gaussian_i)))
  double log_likelihood = logLikelihood(x, m_cache_log_weighted_gaussian_likelihoods);

  accStatisticsInternal(x, stats, log_likelihood);
}
void NestedSampler::removeLivePointsFromSample(const vector<int> &indicesOfLivePointsToRemove, 
                                               vector<int> &clusterIndices, vector<int> &clusterSizes)
{
    int NlivePointsToRemove = indicesOfLivePointsToRemove.size();
    int NlivePointsAtCurrentIteration = clusterIndices.size();

    for (int m = 0; m < NlivePointsToRemove; ++m)
    {
        // Swap the last element of the set of live points with the chosen one 
        // and erase the last element. This is done for all the arrays that store information
        // about live points.
 
        ArrayXd nestedSamplePerLivePointCopy(Ndimensions);
        nestedSamplePerLivePointCopy = nestedSample.col(NlivePointsAtCurrentIteration-1);
        nestedSample.col(NlivePointsAtCurrentIteration-1) = nestedSample.col(indicesOfLivePointsToRemove[m]);
        nestedSample.col(indicesOfLivePointsToRemove[m]) = nestedSamplePerLivePointCopy;
        nestedSample.conservativeResize(Ndimensions, NlivePointsAtCurrentIteration-1);       
                
        double logLikelihoodCopy = logLikelihood(NlivePointsAtCurrentIteration-1);
        logLikelihood(NlivePointsAtCurrentIteration-1) = logLikelihood(indicesOfLivePointsToRemove[m]);
        logLikelihood(indicesOfLivePointsToRemove[m]) = logLikelihoodCopy;
        logLikelihood.conservativeResize(NlivePointsAtCurrentIteration-1);
        

        // In the case of clusterIndices also subtract selected live point from
        // corresponding clusterSizes in order to update the size of the cluster 
        // the live point belongs to.
                
        int clusterIndexCopy = clusterIndices[NlivePointsAtCurrentIteration-1];
        clusterIndices[NlivePointsAtCurrentIteration-1] = clusterIndices[indicesOfLivePointsToRemove[m]];
        --clusterSizes[clusterIndices[indicesOfLivePointsToRemove[m]]];
        clusterIndices[indicesOfLivePointsToRemove[m]] = clusterIndexCopy;
        clusterIndices.pop_back();

                
        // Reduce the current number of live points by one.
                
        --NlivePointsAtCurrentIteration;
    }
}
Beispiel #12
0
fracfloat_t RealDist::approximateDifferentialEntropyFromSamples(Array<fracfloat_t> samples) const {
  assert(log2ff(FRACFLOAT_EPSILON) > FRACFLOAT_NEGATIVE_INFINITY);
  fracfloat_t sum = 0;
  for(unsigned i = 0; i < samples.length; i++){
    fracfloat_t ll = logLikelihood(samples[i]);
    //assert(epsilonCompare(ll, log2ff(likelihood(samples[i])))); //This won't be true if likelihood is 0.
    if(FRACFLOAT_NEGATIVE_INFINITY < ll ) sum += ll;
    //Otherwise, the sample is too unlikely (or NaN), consider it a 0 log 0 situation.
  }

  sum /= samples.length;
  return -sum;
}
void ObservationModel::integratePoseMeasurement(Particles& particles, double poseRoll, double posePitch, const tf::StampedTransform& footprintToTorso){
  double poseHeight = footprintToTorso.getOrigin().getZ();
  ROS_DEBUG("Pose measurement z=%f R=%f P=%f", poseHeight, poseRoll, posePitch);
  // TODO cluster xy of particles => speedup
#pragma omp parallel for
  for (unsigned i=0; i < particles.size(); ++i){
    // integrate IMU meas.:
    double roll, pitch, yaw;
    particles[i].pose.getBasis().getRPY(roll, pitch, yaw);
    particles[i].weight += m_weightRoll * logLikelihood(poseRoll - roll, m_sigmaRoll);
    particles[i].weight += m_weightPitch * logLikelihood(posePitch - pitch, m_sigmaPitch);

    // integrate height measurement (z)
    double heightError = 0;
//    if (getHeightError(particles[i],footprintToTorso, heightError))
//      particles[i].weight += m_weightZ * logLikelihood(heightError, m_sigmaZ);
    particles[i].weight += m_weightZ * logLikelihood(heightError, m_sigmaZ);


  }

}
Beispiel #14
0
//log likelihood function for differential rate dN from WIMPs (background not included)
//This is the one that gets passed to MultiNest
void LogLikedN(double *Cube, int &ndim, int &npars, double &lnew, long &pointer)    
{   

    //get pointer in from MultiNest 
    parameterList *pL = (parameterList *) pointer;
    
    //WIMP pars for this point in the parameter space
    WIMPpars Wcube;
	scaleParams( Cube, pL->p, &Wcube);

    if(pL->binlessL==1)
    {
        lnew = logLikelihoodBinless( &Wcube, pL->detectors, pL->ndet, 1);
    }
    else
    {
        lnew = logLikelihood( &Wcube, pL->detectors, pL->ndet, 1);
    }
    //Cube[(int)pL->p.vLa[0][3]] = Wcube.vLa[0];
    
}
Beispiel #15
0
fracfloat_t RealDist::logLikelihood(fracfloat_t trueValue, fracfloat_t predictedValue) const {
  return logLikelihood(predictedValue - trueValue);
}
void NestedSampler::run(LivePointsReducer &livePointsReducer, const int NinitialIterationsWithoutClustering, 
                        const int NiterationsWithSameClustering, const int maxNdrawAttempts, 
                        const double maxRatioOfRemainderToCurrentEvidence, string pathPrefix)
{
    int startTime = time(0);
    double logMeanLiveEvidence;
    terminationFactor = maxRatioOfRemainderToCurrentEvidence;
    outputPathPrefix = pathPrefix;

    if (printOnTheScreen)
    {
        cerr << "------------------------------------------------" << endl;
        cerr << " Bayesian Inference problem has " << Ndimensions << " dimensions." << endl;
        cerr << "------------------------------------------------" << endl;
        cerr << endl;
    }


    // Save configuring parameters to an output ASCII file

    string fileName = "configuringParameters.txt";
    string fullPath = outputPathPrefix + fileName;
    File::openOutputFile(outputFile, fullPath);
   
   
    outputFile << "# List of configuring parameters used for the NSMC." << endl;
    outputFile << "# Row #1: Ndimensions" << endl;
    outputFile << "# Row #2: Initial(Maximum) NlivePoints" << endl;
    outputFile << "# Row #3: Minimum NlivePoints" << endl;
    outputFile << "# Row #4: NinitialIterationsWithoutClustering" << endl;
    outputFile << "# Row #5: NiterationsWithSameClustering" << endl;
    outputFile << "# Row #6: maxNdrawAttempts" << endl;
    outputFile << "# Row #7: terminationFactor" << endl;
    outputFile << "# Row #8: Niterations" << endl;
    outputFile << "# Row #9: Optimal Niterations" << endl;
    outputFile << "# Row #10: Final Nclusters" << endl;
    outputFile << "# Row #11: Final NlivePoints" << endl;
    outputFile << "# Row #12: Computational Time (seconds)" << endl;
    outputFile << Ndimensions << endl;
    outputFile << initialNlivePoints << endl;
    outputFile << minNlivePoints << endl;
    outputFile << NinitialIterationsWithoutClustering << endl;
    outputFile << NiterationsWithSameClustering << endl;
    outputFile << maxNdrawAttempts << endl;
    outputFile << terminationFactor << endl;


    // Set up the random number generator. It generates integer random numbers
    // between 0 and NlivePoints-1, inclusive.

    uniform_int_distribution<int> discreteUniform(0, NlivePoints-1);


    // Draw the initial sample from the prior PDF. Different coordinates of a point
    // can have different priors, so these have to be sampled individually.
    
    if (printOnTheScreen)
    {
        cerr << "------------------------------------------------" << endl;
        cerr << " Doing initial sampling of parameter space..." << endl;
        cerr << "------------------------------------------------" << endl;
        cerr << endl;
    }
        
    nestedSample.resize(Ndimensions, NlivePoints);
    int beginIndex = 0;
    int NdimensionsOfCurrentPrior;
    ArrayXXd priorSample;

    for (int i = 0; i < ptrPriors.size(); i++)
    {
        // Some priors cover one particalar coordinate, others may cover two or more coordinates
        // Find out how many dimensions the current prior covers.

        NdimensionsOfCurrentPrior = ptrPriors[i]->getNdimensions();
        

        // Draw the subset of coordinates randomly from the current prior
        
        priorSample.resize(NdimensionsOfCurrentPrior, NlivePoints);
        ptrPriors[i]->draw(priorSample);


        // Insert this random subset of coordinates into the total sample of coordinates of points

        nestedSample.block(beginIndex, 0, NdimensionsOfCurrentPrior, NlivePoints) = priorSample;      


        // Move index to the beginning of the coordinate set of the next prior

        beginIndex += NdimensionsOfCurrentPrior;
    }


    // Compute the log(Likelihood) for each of our points in the live sample

    logLikelihood.resize(NlivePoints);
    
    for (int i = 0; i < NlivePoints; ++i)
    {
        logLikelihood(i) = likelihood.logValue(nestedSample.col(i));
    }


    // Initialize the prior mass interval and cumulate it

    double logWidthInPriorMass = log(1.0 - exp(-1.0/NlivePoints));                                             // X_0 - X_1    First width in prior mass
    logCumulatedPriorMass = Functions::logExpSum(logCumulatedPriorMass, logWidthInPriorMass);               // 1 - X_1
    logRemainingPriorMass = Functions::logExpDifference(logRemainingPriorMass, logWidthInPriorMass);        // X_1


    // Initialize first part of width in prior mass for trapezoidal rule
    // X_0 = (2 - X_1), right-side boundary condition for trapezoidal rule

    double logRemainingPriorMassRightBound = Functions::logExpDifference(log(2), logRemainingPriorMass);    
    double logWidthInPriorMassRight = Functions::logExpDifference(logRemainingPriorMassRightBound,logRemainingPriorMass);


    // Find maximum log(Likelihood) value in the initial sample of live points. 
    // This information can be useful when reducing the number of live points adopted within the nesting process.

    logMaxLikelihoodOfLivePoints = logLikelihood.maxCoeff();


    // The nested sampling will involve finding clusters in the sample.
    // This will require the containers clusterIndices and clusterSizes.

    unsigned int Nclusters = 0;
    vector<int> clusterIndices(NlivePoints);           // clusterIndices must have the same number of elements as the number of live points
    vector<int> clusterSizes;                       // The number of live points counted in each cluster is updated everytime one live point
                                                    // is removed from the sample.


    // Start the nested sampling loop. Each iteration, we'll replace the point with the worst likelihood.
    // New points are drawn from the prior, but with the constraint that they should have a likelihood
    // that is better than the currently worst one.
    
    if (printOnTheScreen)
    {
        cerr << "-------------------------------" << endl;
        cerr << " Starting nested sampling...   " << endl;
        cerr << "-------------------------------" << endl;
        cerr << endl;
    }
        
    bool nestedSamplingShouldContinue = true;
    bool livePointsShouldBeReduced = (initialNlivePoints > minNlivePoints);       // Update live points only if required
    
    Niterations = 0;

    do 
    {
        // Resize the arrays to make room for an additional point.
        // Do so without destroying the original contents.

        posteriorSample.conservativeResize(Ndimensions, Niterations + 1);  
        logLikelihoodOfPosteriorSample.conservativeResize(Niterations + 1);
        logWeightOfPosteriorSample.conservativeResize(Niterations + 1);
        

        // Find the point with the worst likelihood. This likelihood value will set a constraint
        // when drawing new points later on.
        
        int indexOfLivePointWithWorstLikelihood;
        worstLiveLogLikelihood = logLikelihood.minCoeff(&indexOfLivePointWithWorstLikelihood);

        
        // Although we will replace the point with the worst likelihood in the live sample, we will save
        // it in our collection of posterior sample. Also save its likelihood value. The weight is 
        // computed and collected at the end of each iteration.

        posteriorSample.col(Niterations) = nestedSample.col(indexOfLivePointWithWorstLikelihood); 
        logLikelihoodOfPosteriorSample(Niterations) = worstLiveLogLikelihood; 


        // Compute the (logarithm of) the mean likelihood of the set of live points.
        // Note that we are not computing mean(log(likelihood)) but log(mean(likelhood)).
        // Since we are only storing the log(likelihood) values, this results in a peculiar
        // way of computing the mean. This will be used for computing the mean live evidence
        // at the end of the iteration.
        
        logMeanLikelihoodOfLivePoints = logLikelihood(0);

        for (int m = 1; m < NlivePoints; m++)
        {
            logMeanLikelihoodOfLivePoints = Functions::logExpSum(logMeanLikelihoodOfLivePoints, logLikelihood(m));
        }

        logMeanLikelihoodOfLivePoints -= log(NlivePoints);
                

        // Find clusters in our live sample of points. Don't do this every iteration but only
        // every x iterations, where x is given by 'NiterationsWithSameClustering'.
        
        if ((Niterations % NiterationsWithSameClustering) == 0)
        {            
            // Don't do clustering the first N iterations, where N is user-specified. That is, 
            // the first N iterations we assume that there is only 1 cluster containing all the points.
            // This is often useful because initially the points may be sampled from a uniform prior,
            // and we therefore don't expect any clustering _before_ the algorithm is able to tune in on 
            // the island(s) of high likelihood. Clusters found in the first N initial iterations are
            // therefore likely purely noise.
        
            if (Niterations < NinitialIterationsWithoutClustering)
            {
                // There is only 1 cluster, containing all objects. All points have the same cluster
                // index, namely 0.
                       
                Nclusters = 1;
                clusterSizes.resize(1);
                clusterSizes[0] = NlivePoints;
                fill(clusterIndices.begin(), clusterIndices.end(), 0);
            }
            else         
            {
                // After the first N initial iterations, we do a proper clustering.
                
                Nclusters = clusterer.cluster(nestedSample, clusterIndices, clusterSizes);
            }
        }


        // Draw a new point, which should replace the point with the worst likelihood.
        // This new point should be drawn from the prior, but with a likelihood greater 
        // than the current worst likelihood. The drawing algorithm may need a starting point,
        // for which we will take a randomly chosen point of the live sample (excluding the
        // worst point).

        int indexOfRandomlyChosenLivePoint = 0;
        
        if (NlivePoints > 1)
        {
            // Select randomly an index of a sample point, but not the one of the worst point

            do 
            {
                // 0 <= indexOfRandomlyChosenLivePoint < NlivePoints

                indexOfRandomlyChosenLivePoint = discreteUniform(engine);
            } 
            while (indexOfRandomlyChosenLivePoint == indexOfLivePointWithWorstLikelihood);
        }


        // drawnPoint will be a starting point as input, and will contain the newly drawn point as output

        ArrayXd drawnPoint = nestedSample.col(indexOfRandomlyChosenLivePoint);
        double logLikelihoodOfDrawnPoint = 0.0;
        bool newPointIsFound = drawWithConstraint(nestedSample, Nclusters, clusterIndices, clusterSizes, 
                                                  drawnPoint, logLikelihoodOfDrawnPoint, maxNdrawAttempts); 


        // If the adopted sampler produces an error (e.g. in the case of the ellipsoidal sampler a failure
        // in the ellipsoid matrix decomposition), then we can stop right here.
        
        nestedSamplingShouldContinue = verifySamplerStatus();
        if (!nestedSamplingShouldContinue) break;


        // If we didn't find a point with a better likelihood, then we can stop right here.
        
        if (!newPointIsFound)
        {
            nestedSamplingShouldContinue = false;
            cerr << "Can't find point with a better Likelihood." << endl; 
            cerr << "Stopping the nested sampling loop prematurely." << endl;
            break;
        }


        // Replace the point having the worst likelihood with our newly drawn one.

        nestedSample.col(indexOfLivePointWithWorstLikelihood) = drawnPoint;
        logLikelihood(indexOfLivePointWithWorstLikelihood) = logLikelihoodOfDrawnPoint;
       
        
        // If we got till here this is not the last iteration possible, hence 
        // update all the information for the next iteration. 
        // Check if the number of live points has not reached the minimum allowed,
        // and update it for the next iteration.

        if (livePointsShouldBeReduced)
        {
            // Update the number of live points for the current iteration based on the previous number.
            // If the number of live points reaches the minimum allowed 
            // then do not update the number anymore.

            updatedNlivePoints = livePointsReducer.updateNlivePoints();
            
            if (updatedNlivePoints > NlivePoints)
            {
                // Terminate program if new number of live points is greater than previous one
                    
                cerr << "Something went wrong in the reduction of the live points." << endl;
                cerr << "The new number of live points is greater than the previous one." << endl;
                cerr << "Quitting program. " << endl;
                break;
            }

                
            // If the lower bound for the number of live points has not been reached yet, 
            // the process should be repeated at the next iteration.
            // Otherwise the minimun number allowed is reached right now. In this case
            // stop the reduction process starting from the next iteration.
                
            livePointsShouldBeReduced = (updatedNlivePoints > minNlivePoints);

            if (updatedNlivePoints != NlivePoints)
            {
                // Resize all eigen arrays and vectors of dimensions NlivePoints according to 
                // new number of live points evaluated. In case previos and new number 
                // of live points coincide, no resizing is done.
                    
                vector<int> indicesOfLivePointsToRemove = livePointsReducer.findIndicesOfLivePointsToRemove(engine);

                    
                // At least one live point has to be removed, hence update the sample

                removeLivePointsFromSample(indicesOfLivePointsToRemove, clusterIndices, clusterSizes);
                        
                        
                // Since everything is fine update discreteUniform with the corresponding new upper bound

                uniform_int_distribution<int> discreteUniform2(0, updatedNlivePoints-1);
                discreteUniform = discreteUniform2;
            }
        }


        // Store the new number of live points in the vector containing this information.
        // This is done even if the new number is the same as the previous one.

        NlivePointsPerIteration.push_back(NlivePoints);

            
        // Compute the mean live evidence given the previous set of live points (see Keeton 2011, MNRAS) 

        logMeanLiveEvidence = logMeanLikelihoodOfLivePoints + Niterations * (log(NlivePoints) - log(NlivePoints + 1));


        // Compute the ratio of the evidence of the live sample to the current Skilling's evidence.
        // Only when we gathered enough evidence, this ratio will be sufficiently small so that we can stop the iterations.

        ratioOfRemainderToCurrentEvidence = exp(logMeanLiveEvidence - logEvidence);


        // Re-evaluate the stopping criterion, using the condition suggested by Keeton (2011)

        nestedSamplingShouldContinue = (ratioOfRemainderToCurrentEvidence > maxRatioOfRemainderToCurrentEvidence);


        // Shrink prior mass interval according to proper number of live points 
        // (see documentation by Enrico Corsaro October 2013). When reducing the number of live points 
        // the equation is a generalized version of that used by Skilling 2004. The equation
        // reduces to the standard case when the new number of live points is the same
        // as the previous one.

        // ---- Use the line below for simple rectangular rule ----
        // double logWeight = logWidthInPriorMass;
        // --------------------------------------------------------
        
        double logStretchingFactor = Niterations*((1.0/NlivePoints) - (1.0/updatedNlivePoints)); 
        logWidthInPriorMass = logRemainingPriorMass + Functions::logExpDifference(0.0, logStretchingFactor - 1.0/updatedNlivePoints);  // X_i - X_(i+1)

        
        // Compute the logWeight according to the trapezoidal rule 0.5*(X_(i-1) - X_(i+1)) 
        // and new contribution of evidence to be cumulated to the total evidence.
        // This is done in logarithmic scale by summing the right (X_(i-1) - X_i) and left part (X_i - X_(i+1)) 
        // of the total width in prior mass required for the trapezoidal rule. We do this computation at the end 
        // of the nested iteration because we need to know the new remaining prior mass of the next iteration.
            
        double logWidthInPriorMassLeft = logWidthInPriorMass; 

        
        // ---- Use the line below for trapezoidal rule ----

        double logWeight = log(0.5) + Functions::logExpSum(logWidthInPriorMassLeft, logWidthInPriorMassRight);
        double logEvidenceContributionNew = logWeight + worstLiveLogLikelihood;


        // Save log(Weight) of the current iteration

        logWeightOfPosteriorSample(Niterations) = logWeight;


        // Update the right part of the width in prior mass interval by replacing it with the left part

        logWidthInPriorMassRight = logWidthInPriorMass;


        // Update the evidence and the information Gain
        
        double logEvidenceNew = Functions::logExpSum(logEvidence, logEvidenceContributionNew);
        informationGain = exp(logEvidenceContributionNew - logEvidenceNew) * worstLiveLogLikelihood 
                        + exp(logEvidence - logEvidenceNew) * (informationGain + logEvidence) 
                        - logEvidenceNew;
        logEvidence = logEvidenceNew;


        // Print current information on the screen, if required

        if (printOnTheScreen)
        {
            if ((Niterations % 50) == 0)
            {
                cerr << "Nit: " << Niterations 
                     << "   Ncl: " << Nclusters 
                     << "   Nlive: " << NlivePoints
                     << "   CPM: " << exp(logCumulatedPriorMass)
                     << "   Ratio: " << ratioOfRemainderToCurrentEvidence
                     << "   log(E): " << logEvidence 
                     << "   IG: " << informationGain
                     << endl; 
            }
        }


        // Update total width in prior mass and remaining width in prior mass from beginning to current iteration
        // and use this information for the next iteration (if any)

        logCumulatedPriorMass = Functions::logExpSum(logCumulatedPriorMass, logWidthInPriorMass);
        logRemainingPriorMass = logStretchingFactor + logRemainingPriorMass - 1.0/updatedNlivePoints;


        // Update new number of live points in NestedSampler class 
            
        NlivePoints = updatedNlivePoints;


        // Increase nested loop counter
        
        Niterations++;
    }
    while (nestedSamplingShouldContinue);


    // Add the remaining live sample of points to our collection of posterior points 
    // (i.e parameter coordinates, likelihood values and weights)

    unsigned int oldNpointsInPosterior = posteriorSample.cols();

    posteriorSample.conservativeResize(Ndimensions, oldNpointsInPosterior + NlivePoints);          // First make enough room
    posteriorSample.block(0, oldNpointsInPosterior, Ndimensions, NlivePoints) = nestedSample;      // Then copy the live sample to the posterior array
    logWeightOfPosteriorSample.conservativeResize(oldNpointsInPosterior + NlivePoints);
    logWeightOfPosteriorSample.segment(oldNpointsInPosterior, NlivePoints).fill(logRemainingPriorMass - log(NlivePoints));  // Check if the best condition to impose 
    logLikelihoodOfPosteriorSample.conservativeResize(oldNpointsInPosterior + NlivePoints);
    logLikelihoodOfPosteriorSample.segment(oldNpointsInPosterior, NlivePoints) = logLikelihood; 


    // Compute Skilling's error on the log(Evidence)
    
    logEvidenceError = sqrt(fabs(informationGain)/NlivePoints);


    // Add Mean Live Evidence of the remaining live sample of points to the total log(Evidence) collected

    logEvidence = Functions::logExpSum(logMeanLiveEvidence, logEvidence);
    
    if (printOnTheScreen)
    {
        cerr << "------------------------------------------------" << endl;
        cerr << " Final log(E): " << logEvidence << " +/- " << logEvidenceError << endl;
        cerr << "------------------------------------------------" << endl;
    }

    // Print total computational time

    printComputationalTime(startTime);
    
    
    // Append information to existing output file and close stream afterwards
    
    outputFile << Niterations << endl;
    outputFile << static_cast<int>((NlivePoints*informationGain) + (NlivePoints*sqrt(Ndimensions*1.0))) << endl;
    outputFile << Nclusters << endl;
    outputFile << NlivePoints << endl;
    outputFile << computationalTime << endl;
}
Beispiel #17
0
fracfloat_t RealDist::surprisal(fracfloat_t value) const {
  return - logLikelihood(value);
}
Beispiel #18
0
 T logLikelihood(const Matrix<T,Dynamic,Dynamic>& x, uint32_t i) const
   {return logLikelihood(x.col(i));};
Beispiel #19
0
/**
 * \brief Estimate the parameters for linear regression.
 *
 * @param verbose Turns verbose printing of various matrices on if
 * non-zero.
 * @param model The number of the genetic model (e.g. additive,
 * recessive, ...) that is to be applied by the apply_model() function.
 * @param interaction
 * @param ngpreds Number of genomic predictors (1 for dosages, 2 for
 * probabilities).
 * @param invvarmatrixin
 * @param robust If non-zero calculate robust standard errors.
 * @param nullmodel If non-zero calculate the null model (excluding
 * SNP information).
 */
void linear_reg::estimate(const int verbose,
                          const int model,
                          const int interaction,
                          const int ngpreds,
                          masked_matrix& invvarmatrixin,
                          const int robust,
                          const int nullmodel) {
    // suda interaction parameter
    // model should come here
    //regdata rdata = rdatain.get_unmasked_data();

    if (verbose)
    {
        cout << reg_data.is_interaction_excluded
                << " <-rdata.is_interaction_excluded\n";
        // std::cout << "invvarmatrix:\n";
        // invvarmatrixin.masked_data->print();
        std::cout << "rdata.X:\n";
        reg_data.X.print();
    }

    mematrix<double> X = apply_model(reg_data.X, model, interaction, ngpreds,
            reg_data.is_interaction_excluded, false, nullmodel);
    if (verbose)
    {
        std::cout << "X:\n";
        X.print();
        std::cout << "Y:\n";
        reg_data.Y.print();
    }

    int length_beta = X.ncol;
    beta.reinit(length_beta, 1);
    sebeta.reinit(length_beta, 1);
    //Han Chen
    if (length_beta > 1)
    {
        if (model == 0 && interaction != 0 && ngpreds == 2 && length_beta > 2)
        {
            covariance.reinit(length_beta - 2, 1);
        }
        else
        {
            covariance.reinit(length_beta - 1, 1);
        }
    }

    double sigma2_internal;

    LDLT <MatrixXd> Ch;
    if (invvarmatrixin.length_of_mask != 0)
    {
        //retrieve masked data W
        invvarmatrixin.update_mask(reg_data.masked_data);
        mmscore_regression(X, invvarmatrixin, Ch);
        double N = X.nrow;
        //sigma2_internal = sigma2 / (N - static_cast<double>(length_beta));
        // Ugly fix to the fact that if we do mmscore, sigma2 is already
        //  in the matrix...
        //      YSA, 2009.07.20
        sigma2_internal = 1.0;
        sigma2 /= N;
    }
    else  // NO mm-score regression : normal least square regression
    {
        LeastSquaredRegression(X, Ch);
        double N = static_cast<double>(X.nrow);
        double P = static_cast<double>(length_beta);
        sigma2_internal = sigma2 / (N - P);
        sigma2 /= N;
    }
    /*
     loglik = 0.;
     double ss=0;
     for (int i=0;i<rdata.nids;i++) {
     double resid = rdata.Y[i] - beta.get(0,0); // intercept
     for (int j=1;j<beta.nrow;j++) resid -= beta.get(j,0)*X.get(i,j);
     // residuals[i] = resid;
     ss += resid*resid;
     }
     sigma2 = ss/N;
     */
    //cout << "estimate " << rdata.nids << "\n";
    //(rdata.X).print();
    //for (int i=0;i<rdata.nids;i++) cout << rdata.masked_data[i] << " ";
    //cout << endl;
    logLikelihood(X);

    MatrixXd tXX_inv = Ch.solve(MatrixXd(length_beta, length_beta).
                                Identity(length_beta, length_beta));
    mematrix<double> robust_sigma2(X.ncol, X.ncol);

    int offset = X.ncol- 1;
     //if additive and interaction and 2 predictors and more than 2 betas
     if (model == 0 && interaction != 0 && ngpreds == 2 && length_beta > 2){
         offset = X.ncol - 2;
     }

    if (robust)
    {
        RobustSEandCovariance(X, robust_sigma2, tXX_inv, offset);
    }
    else
    {
        PlainSEandCovariance(sigma2_internal, tXX_inv, offset);
    }
}