Esempio n. 1
0
void SIR_scheme::copy_resamples (ColMatrix& P, const Importance_resampler::Resamples_t& presamples)
/*
 * Update P by selectively copying presamples
 * Uses a in-place copying alogorithm
 * Algorithm: In-place copying
 *  First copy the live samples (those resampled) to end of P
 *  Replicate live sample in-place
 */
{
							// reverse_copy_if live
	std::size_t si = P.size2(), livei = si;
	Importance_resampler::Resamples_t::const_reverse_iterator pri, pri_end = presamples.rend();
	for (pri = presamples.rbegin(); pri != pri_end; ++pri) {
		--si;
		if (*pri > 0) {
			--livei;
			noalias(FM::column(P,livei)) = FM::column(P,si);
		}
	}
	assert (si == 0);
							// Replicate live samples
	si = 0;
	Importance_resampler::Resamples_t::const_iterator pi, pi_end = presamples.end();
	for (pi = presamples.begin(); pi != pi_end; ++pi) {
		std::size_t res = *pi;
		if (res > 0) {
			do  {
				noalias(FM::column(P,si)) = FM::column(P,livei);
				++si; --res;
			} while (res > 0);
			++livei;
		}
	}
	assert (si == P.size2()); assert (livei == P.size2());
}
Esempio n. 2
0
void SIR_scheme::roughen_minmax (ColMatrix& P, Float K) const
/*
 * Roughening
 *  Uses algorithm from Ref[1] using max-min in each state of P
 *  K is scaleing factor for roughening noise
 *  unique_samples is unchanged as roughening is used to postprocess observe resamples
 * Numerical colapse of P
 *  P with very small or zero range result in minimal roughening
 * Exceptions:
 *  none
 *		unchanged: P
 */
{
	using namespace std;
						// Scale Sigma by constant and state dimensions
	Float SigmaScale = K * pow (Float(P.size2()), -1/Float(x_size));

						// Find min and max states in all P, precond P not empty
	Vec xmin(x_size); noalias(xmin) = column(P,0);
	Vec xmax(x_size); noalias(xmax) = xmin;
	ColMatrix::iterator2 pi = P.begin2();
	while (pi != P.end2())		// Loop includes 0 to simplify code
	{
		Vec::iterator mini = xmin.begin();
		Vec::iterator maxi = xmax.begin();

#ifdef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
		for (ColMatrix::iterator1 xpi = begin(pi, ublas::iterator2_tag()); xpi != end(pi, ublas::iterator2_tag()); ++xpi)
#else
		for (ColMatrix::iterator1 xpi = pi.begin(); xpi != pi.end(); ++xpi)
#endif
		{
			if (*xpi < *mini) *mini = Float(*xpi);	// ISSUE mixed type proxy assignment
			if (*xpi > *maxi) *maxi = Float(*xpi);
			++mini; ++maxi;
		}
		++pi;
	}
   						// Roughening st.dev max-min
	Vec rootq(x_size);
	rootq = xmax;
	noalias(rootq) -= xmin;
	rootq *= SigmaScale;
   						// Apply roughening predict based on scaled variance
	DenseVec n(x_size);
	for (pi = P.begin2(); pi != P.end2(); ++pi) {
		random.normal (n);			// independant zero mean normal
									// multiply elements by std dev
		for (DenseVec::iterator ni = n.begin(); ni != n.end(); ++ni) {
			*ni *= rootq[ni.index()];
		}
		ColMatrix::Column Pi(P,pi.index2());
		noalias(n) += Pi;			// add to P
		Pi = n;
	}
}
Esempio n. 3
0
void SIR_kalman_scheme::roughen_correlated (ColMatrix& P, Float K)
/*
 * Roughening
 *  Uses a roughening noise based on covariance of P
 *  This is a more sophisticated algorithm then Ref[1] as it takes
 *  into account the correlation of P
 *  K is scaleing factor for roughening noise
 * Numerical colapse of P
 *  Numerically when covariance of P semi definite (or close), X's UdU factorisation
 *  may be negative.
 * Exceptions:
 *   Bayes_filter_exception due colapse of P
 *    unchanged: P
 */
{
	using namespace std;
						// Scale variance by constant and state dimensions
	Float VarScale = sqr(K) * pow (Float(P.size2()), Float(-2.)/Float(x_size));

	update_statistics();	// Estimate sample mean and covariance

						// Decorrelate states
	Matrix UD(x_size,x_size);
	Float rcond = UdUfactor (UD, X);
	rclimit.check_PSD(rcond, "Roughening X not PSD");

						// Sampled predict model for roughening
	FM::identity (roughen_model.Fx);
						// Roughening predict based on scaled variance
	UdUseperate (roughen_model.G, roughen_model.q, UD);
	roughen_model.q *= VarScale;
	roughen_model.init_GqG();
						// Predict using roughening model
	predict (roughen_model);
}
Esempio n. 4
0
void RescaledHmmLikelihood::computeD2Forward_() const
{
  //Init arrays:
  if (d2Likelihood_.size()==0){
    d2Likelihood_.resize(nbSites_);
    for (size_t i=0;i<nbSites_;i++)
      d2Likelihood_[i].resize(nbStates_);
  }
  if (d2Scales_.size()==0)
    d2Scales_.resize(nbSites_);
  
  double x;
  vector<double> tmp(nbStates_), dTmp(nbStates_), d2Tmp(nbStates_);
  vector<double> d2LScales(nbSites_);
  
  //Transition probabilities:
  const ColMatrix<double> trans(transitionMatrix_->getPij());

  //Initialisation:
  d2Scales_[0] = 0;
  const vector<double>* emissions = &(*emissionProbabilities_)(0);
  const vector<double>* dEmissions = &emissionProbabilities_->getDEmissionProbabilities(0);
  const vector<double>* d2Emissions = &emissionProbabilities_->getD2EmissionProbabilities(0);

  for (size_t j = 0; j < nbStates_; j++)
  {
    tmp[j] = (*emissions)[j] * transitionMatrix_->getEquilibriumFrequencies()[j];
    dTmp[j] = (*dEmissions)[j] * transitionMatrix_->getEquilibriumFrequencies()[j];
    d2Tmp[j] = (*d2Emissions)[j] * transitionMatrix_->getEquilibriumFrequencies()[j];

    d2Scales_[0] += d2Tmp[j];
  }

  d2LScales[0]=d2Scales_[0]/scales_[0]-pow(dScales_[0]/scales_[0],2);
  
  for (size_t j = 0; j < nbStates_; j++)
    d2Likelihood_[0][j] = d2Tmp[j] / scales_[0] - (d2Scales_[0] * tmp[j] + 2 * dScales_[0] * dTmp[j]) / pow(scales_[0],2)
      +  2 * pow(dScales_[0],2) * tmp[j] / pow(scales_[0],3);
   
  //Recursion:

  size_t nextBrkPt = nbSites_; //next break point
  vector<size_t>::const_iterator bpIt = breakPoints_.begin();
  if (bpIt != breakPoints_.end()) nextBrkPt = *bpIt;
  
  for (size_t i = 1; i < nbSites_; i++)
  {
    dScales_[i] = 0 ;

    emissions = &(*emissionProbabilities_)(i);
    dEmissions = &emissionProbabilities_->getDEmissionProbabilities(i);
    d2Emissions = &emissionProbabilities_->getD2EmissionProbabilities(i);
    
    if (i < nextBrkPt)
    {
      size_t iip = (i - 1) * nbStates_;

      for (size_t j = 0; j < nbStates_; j++)
      {
        x = 0;
        for (size_t k = 0; k < nbStates_; k++)
          x += trans(k,j) * likelihood_[iip + k];

        tmp[j] = (*emissions)[j] * x;
        dTmp[j] = (*dEmissions)[j] * x + (*emissions)[j] * VectorTools::sum(trans.getCol(j) * dLikelihood_[i-1]);
        d2Tmp[j] = (*d2Emissions)[j] * x + 2 * (*dEmissions)[j] * VectorTools::sum(trans.getCol(j) * dLikelihood_[i-1])
          + (*emissions)[j] * VectorTools::sum(trans.getCol(j) * d2Likelihood_[i-1]);
          
        d2Scales_[i] += d2Tmp[j];
      }
    }
    else //Reset markov chain:
    {
      for (size_t j = 0; j < nbStates_; j++)
      {
        tmp[j] = (*emissions)[j] * transitionMatrix_->getEquilibriumFrequencies()[j];
        dTmp[j] = (*dEmissions)[j] * transitionMatrix_->getEquilibriumFrequencies()[j];
        d2Tmp[j] = (*d2Emissions)[j] * transitionMatrix_->getEquilibriumFrequencies()[j];
        
        d2Scales_[i] += d2Tmp[j];
      }
      
      bpIt++;
      if (bpIt != breakPoints_.end()) nextBrkPt = *bpIt;
      else nextBrkPt = nbSites_;
    }

    d2LScales[i]=d2Scales_[i]/scales_[i]-pow(dScales_[i]/scales_[i],2);
  
    for (size_t j = 0; j < nbStates_; j++)
      d2Likelihood_[i][j] = d2Tmp[j] / scales_[i] - (d2Scales_[i] * tmp[j] + 2 * dScales_[i] * dTmp[j]) / pow(scales_[i],2)
        +  2 * pow(dScales_[i],2) * tmp[j] / pow(scales_[i],3);
  }
  
  greater<double> cmp;
  sort(d2LScales.begin(), d2LScales.end(), cmp);
  dLogLik_ = 0;
  for (size_t i = 0; i < nbSites_; ++i)
  {
    d2LogLik_ += d2LScales[i];
  }
}
Esempio n. 5
0
Bayes_base::Float Unscented_scheme::observe (Correlated_additive_observe_model& h, const FM::Vec& z)
/* Observation fusion
 *  Pre : x,X
 *  Post: x,X is PSD
 */
{
    std::size_t z_size = z.size();
    ColMatrix zXX (z_size, 2*x_size+1);
    Vec zp(z_size);
    SymMatrix Xzz(z_size,z_size);
    Matrix Xxz(x_size,z_size);
    Matrix W(x_size,z_size);

    observe_size (z.size());	// Dynamic sizing

    // Create Unscented distribution
    kappa = observe_Kappa(x_size);
    Float x_kappa = Float(x_size) + kappa;
    unscented (XX, x, X, x_kappa);

    // Predict points of XX using supplied observation model
    {
        Vec zXXi(z_size), zXX0(z_size);
        zXX0 = h.h( column(XX,0) );
        column(zXX,0) = zXX0;
        for (std::size_t i = 1; i < XX.size2(); ++i) {
            zXXi = h.h( column(XX,i) );
            // Normalise relative to zXX0
            h.normalise (zXXi, zXX0);
            column(zXX,i) = zXXi;
        }
    }

    // Mean of predicted distribution: zp
    noalias(zp) = column(zXX,0) * kappa;
    for (std::size_t i = 1; i < zXX.size2(); ++i) {
        noalias(zp) += column(zXX,i) / Float(2); // ISSUE uBlas may not be able to promote integer 2
    }
    zp /= x_kappa;

    // Covariance of observation predict: Xzz
    // Subtract mean from each point in zXX
    for (std::size_t i = 0; i < XX_size; ++i) {
        column(zXX,i).minus_assign (zp);
    }
    // Center point, premult here by 2 for efficiency
    {
        ColMatrix::Column zXX0 = column(zXX,0);
        noalias(Xzz) = FM::outer_prod(zXX0, zXX0);
        Xzz *= 2*kappa;
    }
    // Remaining Unscented points
    for (std::size_t i = 1; i < zXX.size2(); ++i) {
        ColMatrix::Column zXXi = column(zXX,i);
        noalias(Xzz) += FM::outer_prod(zXXi, zXXi);
    }
    Xzz /= 2*x_kappa;

    // Correlation of state with observation: Xxz
    // Center point, premult here by 2 for efficiency
    {
        noalias(Xxz) = FM::outer_prod(column(XX,0) - x, column(zXX,0));
        Xxz *= 2*kappa;
    }
    // Remaining Unscented points
    for (std::size_t i = 1; i < zXX.size2(); ++i) {
        noalias(Xxz) += FM::outer_prod(column(XX,i) - x, column(zXX,i));
    }
    Xxz /= 2* (Float(x_size) + kappa);

    // Innovation covariance
    S = Xzz;
    noalias(S) += h.Z;
    // Inverse innovation covariance
    Float rcond = UdUinversePD (SI, S);
    rclimit.check_PD(rcond, "S not PD in observe");
    // Kalman gain
    noalias(W) = prod(Xxz,SI);

    // Normalised innovation
    h.normalise(s = z, zp);
    noalias(s) -= zp;

    // Filter update
    noalias(x) += prod(W,s);
    RowMatrix WStemp(W.size1(), S.size2());
    noalias(X) -= prod_SPD(W,S, WStemp);

    return rcond;
}