Пример #1
0
UKFilter::UKFilter(const FM::Vec& x0, const FM::SymMatrix& P0) :
        Kalman_state_filter(x0.size()),
        Unscented_scheme(x0.size()),
        z_p(Empty)
{
    UKFilter::x_size = x0.size();
    UKFilter::XX_size = 2*x0.size()+1;
    init(x0, P0);
}
Пример #2
0
PFilter::PFilter(const FM::Vec& x0,
                 const FM::SymMatrix& P0,
                 std::size_t s_size) :
        Sample_state_filter (x0.size(), s_size),
        Kalman_state_filter(x0.size()),
        SIR_kalman_scheme(x0.size(), s_size, rnd),
        w(wir)
{
    PFilter::x_size = x0.size();
    init(x0, P0);
}
Пример #3
0
Bayes_base::Float
 CI_scheme::observe_innovation (Linrz_correlated_observe_model& h, const FM::Vec& s)
/* Correlated innovation observe
 */
{
	const Float one = 1;
						// size consistency, z to model
	if (s.size() != h.Z.size1())
		error (Logic_exception("observation and model size inconsistent"));
	observe_size (s.size());	// dynamic sizing

						// Linear conditioning for omega
	SymMatrix invZ(h.Z.size1(),h.Z.size2());
	Float rcond = UdUinversePD (invZ, h.Z);
	rclimit.check_PSD(rcond, "Z not PSD in observe");

	Matrix HTinvZ (prod(trans(h.Hx), invZ));
	SymMatrix HTinvZH (prod(HTinvZ, h.Hx));

	SymMatrix invX(X.size1(),X.size2());
	rcond = UdUinversePD (invX, X);
	rclimit.check_PD(rcond, "X not PD in observe");


						// find omega
	Float omega = Omega(invX, HTinvZH, X);

						// calculate predicted innovation
	Matrix XHT (prod(X, trans(h.Hx)));
	Matrix HXHT (prod(h.Hx, XHT));
	S = HXHT * (one-omega) + h.Z * omega;

						// inverse innovation covariance
	rcond = UdUinversePD (SI, S);
	rclimit.check_PD(rcond, "S not PD in observe");

	Matrix K (prod(XHT*(one-omega), SI));

						// state update
	noalias(x) += prod(K, s);
						// inverse covariance
	invX *= omega;						
	noalias(invX) += HTinvZH*(one-omega);
						// covariance
	rcond = UdUinversePD (X, invX);
	rclimit.check_PD(rcond, "inverse covariance not PD in observe");
	return rcond;
}
Пример #4
0
Simple_linear_predict_model::Simple_linear_predict_model (const FM::Matrix& Fx_init, const FM::Matrix& G_init, const FM::Vec& q_init) :
// Precondition: Fx, q and G are conformantly dimensioned (not checked)
	Linear_predict_model (Fx_init.size1(), q_init.size())
{
	Fx = Fx_init;
	G = G_init;
	q = q_init;
}
Пример #5
0
Simple_addative_predict_model::Simple_addative_predict_model (State_function f_init, const FM::Matrix& G_init, const FM::Vec& q_init) :
// Precondition: G, q are conformantly dimensioned (not checked)
	Addative_predict_model (G_init.size1(), q_init.size()),
	ff(f_init)
{
	G = G_init;
	q = q_init;
}
Пример #6
0
void PFilter::predict_observation(Correlated_additive_observe_model& observe_model, FM::Vec& z_pred, FM::SymMatrix& R_pred)
{
    z_pred.clear();   // mean
    const std::size_t nSamples = S.size2();
    for (std::size_t i = 0; i != nSamples; ++i) {
        FM::ColMatrix::Column Si(S,i);
        z_pred.plus_assign (observe_model.h(Si));
    }
    z_pred /= Float(S.size2());

    R_pred.clear();   // Covariance
    for (std::size_t i = 0; i != nSamples; ++i) {
        FM::ColMatrix::Column Si(S,i);
        R_pred.plus_assign (FM::outer_prod(observe_model.h(Si)-z_pred, observe_model.h(Si)-z_pred));
    }
    R_pred /= Float(nSamples);
}
Пример #7
0
Simple_linrz_predict_model::Simple_linrz_predict_model (State_function f_init, const FM::Matrix& Fx_init, const FM::Matrix& G_init, const FM::Vec& q_init) :
// Precondition: Fx, G, q are conformantly dimensioned (not checked)
	Linrz_predict_model (Fx_init.size1(), q_init.size()),
	ff(f_init)
{
	Fx = Fx_init;
	G = G_init;
	q = q_init;
}
void Kalman_SLAM::observe_new( unsigned feature, const Feature_observe_inverse& fom, const FM::Vec& z )
// fom: must have a the special form required for SLAM::obeserve_new
{
    // size consistency, single state feature
    if (fom.Hx.size1() != 1)
        error (BF::Logic_exception("observation and model size inconsistent"));

    // make new filter with additional (uninitialized) feature state
    if (feature >= nM)
    {
        nM = feature+1;
        Kalman_filter_generator::Filter_type* nf = fgenerator.generate(nL+nM);
        FM::noalias(nf->x.sub_range(0,full->x.size())) = full->x;
        FM::noalias(nf->X.sub_matrix(0,full->x.size(),0,full->x.size())) = full->X;

        fgenerator.dispose(full);
        full = nf;
    }
    // build augmented location and observation
    FM::Vec sz(nL+z.size());
    sz.sub_range(0,nL) = full->x.sub_range(0,nL);
    sz.sub_range(nL,nL+z.size() )= z;

    // TODO use named references rather then explict Ha Hb
    FM::Matrix Ha (fom.Hx.sub_matrix(0,1, 0,nL) );
    FM::Matrix Hb (fom.Hx.sub_matrix(0,1, nL,nL+z.size()) );
    FM::Matrix tempHa (1,nL);
    FM::Matrix tempHb (1,sz.size());

    // feature covariance with existing location and features
    // X+ = [0 Ha] X [0 Ha]' + Hb Z Hb'
    // - zero existing feature covariance
    zero( full->X.sub_matrix(0,full->X.size1(), nL+feature,nL+feature+1) );
    full->X.sub_matrix(nL+feature,nL+feature+1,0,nL+nM) = FM::prod(Ha,full->X.sub_matrix(0,nL, 0,nL+nM) );
    // feature state and variance
    full->x[nL+feature] = fom.h(sz)[0];
    full->X(nL+feature,nL+feature) = ( FM::prod_SPD(Ha,full->X.sub_matrix(0,nL, 0,nL),tempHa) +
                                       FM::prod_SPD(Hb,fom.Zv,tempHb)
                                     ) (0,0);

    full->init ();
}
void Kalman_SLAM::init_kalman (const FM::Vec& x, const FM::SymMatrix& X)
{
    // TODO maintain map states
    nL = x.size();
    nM = 0;
    if (loc) fgenerator.dispose (loc);
    if (full) fgenerator.dispose (full);
    // generate a location filter for prediction
    loc = fgenerator.generate(nL);
    // generate full filter
    full = fgenerator.generate(nL);
    // initialise location states
    full->x.sub_range(0,nL) = x;
    full->X.sub_matrix(0,nL,0,nL) = X;
    full->init();
}
Пример #10
0
Bayes_base::Float
 CI_scheme::observe_innovation (Linrz_uncorrelated_observe_model& h, const FM::Vec& s)
/* Iterated Extended Kalman Filter
 * Bar-Shalom and Fortmann p.119 (full scheme)
 * A hard limit is placed on the iterations whatever the
 * the normal terminal condition is to guarantee termination
 * Uncorrelated noise
 */
{
						// ISSUE: Implement simplified uncorrelated noise equations
	std::size_t z_size = s.size();
	SymMatrix Z(z_size,z_size);

	Adapted_Linrz_correlated_observe_model hh(h);
	return observe_innovation (hh, s);
}
Пример #11
0
Bayes_base::Float Information_root_scheme::observe_innovation (Linrz_uncorrelated_observe_model& h, const FM::Vec& s)
/* Extended linrz uncorrelated observe
 * Precondition:
 *		r(k+1|k),R(k+1|k)
 * Postcondition:
 *		r(k+1|k+1),R(k+1|k+1)
 *
 * Uses LAPACK geqrf for QR decomposition (without PIVOTING)
 * ISSUE correctness of linrz form needs validation
 * ISSUE Efficiency. Product of Zir can be simplified
 */
{
	const std::size_t x_size = x.size();
	const std::size_t z_size = s.size();
						// Size consistency, z to model
	if (z_size != h.Zv.size())
		error (Logic_exception("observation and model size inconsistent"));

						// Require Inverse of Root of uncorrelated observe noise
	DiagMatrix Zir(z_size,z_size);
	Zir.clear();
	for (std::size_t i = 0; i < z_size; ++i)
	{
		Zir(i,i) = 1 / std::sqrt(h.Zv[i]);
	}
						// Form Augmented matrix for factorisation
	DenseColMatrix A(x_size+z_size, x_size+1);	// Column major required for LAPACK, also this property is using in indexing
	A.sub_matrix(0,x_size, 0,x_size) .assign(R);
	A.sub_matrix(x_size,x_size+z_size, 0,x_size) .assign (prod(Zir, h.Hx));
	A.sub_column(0,x_size, x_size) .assign (r);
	A.sub_column(x_size,x_size+z_size, x_size) .assign (prod(Zir, s+prod(h.Hx,x)));

						// Calculate factorisation so we have and upper triangular R
	DenseVec tau(x_size+1);
	int info = LAPACK::geqrf (A, tau);
	if (info != 0)
			error (Numeric_exception("Observe no QR factor"));
						// Extract the roots, junk in strict lower triangle
	noalias(R) = UpperTri( A.sub_matrix(0,x_size, 0,x_size) );
	noalias(r) = A.sub_column(0,x_size, x_size);

	return UCrcond(R);	// compute rcond of result
}
Пример #12
0
Bayes_base::Float Information_root_scheme::observe_innovation (Linrz_correlated_observe_model& h, const FM::Vec& s)
/* Extended linrz correlated observe
 * Precondition:
 *		r(k+1|k),R(k+1|k)
 * Postcondition:
 *		r(k+1|k+1),R(k+1|k+1)
 *
 * Uses LAPACK geqrf for QR decomposigtion (without PIVOTING)
 * ISSUE correctness of linrz form needs validation
 */
{
    const std::size_t x_size = x.size();
    const std::size_t z_size = s.size();
    // Size consistency, z to model
    if (z_size != h.Z.size1())
        error (Logic_exception("observation and model size inconsistent"));

    // Require Inverse of Root of uncorrelated observe noise
    UTriMatrix Zir(z_size,z_size);
    Float rcond = UCfactor (Zir, h.Z);
    rclimit.check_PD(rcond, "Z not PD");
    bool singular = UTinverse (Zir);
    assert (!singular);
    (void)singular;
    // Form Augmented matrix for factorisation
    DenseColMatrix A(x_size+z_size, x_size+1);	// Column major required for LAPACK, also this property is using in indexing
    A.sub_matrix(0,x_size, 0,x_size) .assign (R);
    A.sub_matrix(x_size,x_size+z_size, 0,x_size) .assign (prod(Zir, h.Hx));
    A.sub_column(0,x_size, x_size) .assign (r);
    A.sub_column(x_size,x_size+z_size, x_size) .assign (prod(Zir, s+prod(h.Hx,x)));

    // Calculate factorisation so we have and upper triangular R
    DenseVec tau(x_size+1);
    int info = LAPACK::geqrf (A, tau);
    if (info != 0)
        error (Numeric_exception("Observe no QR factor"));
    // Extract the roots, junk in strict lower triangle
    noalias(R) = UpperTri( A.sub_matrix(0,x_size, 0,x_size) );
    noalias(r) = A.sub_column(0,x_size, x_size);

    return UCrcond(R);	// compute rcond of result
}
Пример #13
0
Bayes_base::Float
 CartesianModel3D::Likelihood_correlated::L(const Correlated_additive_observe_model& model, const FM::Vec& z, const FM::Vec& zp) const
/*
 * Definition of likelihood given an additive Gaussian observation model:
 *  p(z|x) = exp(-0.5*(z-h(x))'*inv(Z)*(z-h(x))) / sqrt(2pi^nz*det(Z));
 *  L(x) the the Likelihood L(x) doesn't depend on / sqrt(2pi^nz) for constant z size
 * Precond: Observation Information: z,Z_inv,detZterm
 */
{
   if (!zset)
      Bayes_base::error (Logic_exception ("BGSubModel used without Lz set"));
               // Normalised innovation
   zInnov = z;
   model.normalise (zInnov, zp);
   FM::noalias(zInnov) -= zp;

   Float logL = scaled_vector_square(zInnov, Z_inv);
   using namespace std;
   return exp(Float(-0.5)*(logL + z.size()*log(2*M_PI) + logdetZ));   // normalized likelihood
}
Пример #14
0
UD_scheme::Float
 UD_scheme::predictGq (const Matrix& Fx, const Matrix& G, const FM::Vec& q)
/* MWG-S prediction from Bierman  p.132
 *  q can have order less then x and a matching G so GqG' has order of x
 * Precond:
 *  UD
 * Postcond:
 *  UD
 *
 * Return:
 *		reciprocal condition number, -1 if negative, 0 if semi-definite (including zero)
 */
{
	std::size_t i,j,k;
	const std::size_t n = x.size();
	const std::size_t Nq = q.size();
	const std::size_t N = n+Nq;
	Float e;
					// Check preallocated space for q size
	if (Nq > q_max)
		error (Logic_exception("Predict model q larger than preallocated space"));

	if (n > 0)		// Simplify reverse loop termination
	{
						// Augment d with q, UD with G
		for (i = 0; i < Nq; ++i)		// 0..Nq-1
		{
			d[i+n] = q[i];
		}
		for (j = 0; j < n; ++j)		// 0..n-1
		{
			Matrix::Row UDj(UD,j);
			Matrix::const_Row  Gj(G,j);
			for (i = 0; i < Nq; ++i)		// 0..Nq-1
				UDj[i+n] = Gj[i];
		}

						// U=Fx*U and diagonals retrieved
		for (j = n-1; j > 0; --j)		// n-1..1
		{
						// Prepare d(0)..d(j) as temporary
			for (i = 0; i <= j; ++i)	// 0..j
				d[i] = Float(UD(i,j));	// ISSUE mixed type proxy assignment

						// Lower triangle of UD is implicitly empty
			for (i = 0; i < n; ++i) 	// 0..n-1
			{
				Matrix::Row UDi(UD,i);
				Matrix::const_Row Fxi(Fx,i);
				UDi[j] = Fxi[j];
				for (k = 0; k < j; ++k)	// 0..j-1
					UDi[j] += Fxi[k] * d[k];
			}
		}
		d[0] = Float(UD(0,0));	// ISSUE mixed type proxy assignment

						//  Complete U = Fx*U
		for (j = 0; j < n; ++j)			// 0..n-1
		{
			UD(j,0) = Fx(j,0);
		}

						// The MWG-S algorithm on UD transpose
		j = n-1;
		do {							// n-1..0
			Matrix::Row UDj(UD,j);
			e = 0;
			for (k = 0; k < N; ++k)		// 0..N-1
			{
				v[k] = Float(UDj[k]);	// ISSUE mixed type proxy assignment
				dv[k] = d[k] * v[k];
				e += v[k] * dv[k];
			}
			// Check diagonal element
			if (e > 0)
			{
				// Positive definite
				UDj[j] = e;

				Float diaginv = 1 / e;
				for (k = 0; k < j; ++k)	// 0..j-1
				{
					Matrix::Row UDk(UD,k);
					e = 0;
					for (i = 0; i < N; ++i)	// 0..N-1
						e += UDk[i] * dv[i];
					e *= diaginv;
					UDj[k] = e;

					for (i = 0; i < N; ++i)	// 0..N-1
						UDk[i] -= e * v[i];
				}
			}//PD
			else if (e == 0)
			{
				// Possibly semi-definite, check not negative
				UDj[j] = e;

				// 1 / e is infinite
				for (k = 0; k < j; ++k)	// 0..j-1
				{
					Matrix::Row UDk(UD,k);
					for (i = 0; i < N; ++i)	// 0..N-1
					{
						e = UDk[i] * dv[i];
						if (e != 0)
							goto Negative;
					}
					// UD(j,k) unaffected
				}
			}//PD
			else
			{
				// Negative
				goto Negative;
			}
		} while (j-- > 0); //MWG-S loop

						// Transpose and Zero lower triangle
		for (j = 1; j < n; ++j)			// 0..n-1
		{
			Matrix::Row UDj(UD,j);
			for (i = 0; i < j; ++i)
			{
				UD(i,j) = UDj[i];
				UDj[i] = 0;			// Zeroing unnecessary as lower only used as a scratch
			}
		}

	}

	// Estimate the reciprocal condition number from upper triangular part
	return UdUrcond(UD,n);

Negative:
	return -1;
}
Пример #15
0
Bayes_base::Float Unscented_scheme::observe (Correlated_additive_observe_model& h, const FM::Vec& z)
/* Observation fusion
 *  Pre : x,X
 *  Post: x,X is PSD
 */
{
    std::size_t z_size = z.size();
    ColMatrix zXX (z_size, 2*x_size+1);
    Vec zp(z_size);
    SymMatrix Xzz(z_size,z_size);
    Matrix Xxz(x_size,z_size);
    Matrix W(x_size,z_size);

    observe_size (z.size());	// Dynamic sizing

    // Create Unscented distribution
    kappa = observe_Kappa(x_size);
    Float x_kappa = Float(x_size) + kappa;
    unscented (XX, x, X, x_kappa);

    // Predict points of XX using supplied observation model
    {
        Vec zXXi(z_size), zXX0(z_size);
        zXX0 = h.h( column(XX,0) );
        column(zXX,0) = zXX0;
        for (std::size_t i = 1; i < XX.size2(); ++i) {
            zXXi = h.h( column(XX,i) );
            // Normalise relative to zXX0
            h.normalise (zXXi, zXX0);
            column(zXX,i) = zXXi;
        }
    }

    // Mean of predicted distribution: zp
    noalias(zp) = column(zXX,0) * kappa;
    for (std::size_t i = 1; i < zXX.size2(); ++i) {
        noalias(zp) += column(zXX,i) / Float(2); // ISSUE uBlas may not be able to promote integer 2
    }
    zp /= x_kappa;

    // Covariance of observation predict: Xzz
    // Subtract mean from each point in zXX
    for (std::size_t i = 0; i < XX_size; ++i) {
        column(zXX,i).minus_assign (zp);
    }
    // Center point, premult here by 2 for efficiency
    {
        ColMatrix::Column zXX0 = column(zXX,0);
        noalias(Xzz) = FM::outer_prod(zXX0, zXX0);
        Xzz *= 2*kappa;
    }
    // Remaining Unscented points
    for (std::size_t i = 1; i < zXX.size2(); ++i) {
        ColMatrix::Column zXXi = column(zXX,i);
        noalias(Xzz) += FM::outer_prod(zXXi, zXXi);
    }
    Xzz /= 2*x_kappa;

    // Correlation of state with observation: Xxz
    // Center point, premult here by 2 for efficiency
    {
        noalias(Xxz) = FM::outer_prod(column(XX,0) - x, column(zXX,0));
        Xxz *= 2*kappa;
    }
    // Remaining Unscented points
    for (std::size_t i = 1; i < zXX.size2(); ++i) {
        noalias(Xxz) += FM::outer_prod(column(XX,i) - x, column(zXX,i));
    }
    Xxz /= 2* (Float(x_size) + kappa);

    // Innovation covariance
    S = Xzz;
    noalias(S) += h.Z;
    // Inverse innovation covariance
    Float rcond = UdUinversePD (SI, S);
    rclimit.check_PD(rcond, "S not PD in observe");
    // Kalman gain
    noalias(W) = prod(Xxz,SI);

    // Normalised innovation
    h.normalise(s = z, zp);
    noalias(s) -= zp;

    // Filter update
    noalias(x) += prod(W,s);
    RowMatrix WStemp(W.size1(), S.size2());
    noalias(X) -= prod_SPD(W,S, WStemp);

    return rcond;
}
Пример #16
0
void UKFilter::predict_observation(Correlated_additive_observe_model& observe_model, FM::Vec& z_pred, FM::SymMatrix& R_pred)
{
    std::size_t z_size = z_pred.size();
    ColMatrix zXX(z_size, 2*x_size+1);
    SymMatrix Xzz(z_size,z_size);
    Matrix Xxz(x_size,z_size);

    z_p.resize(z_size);
    observe_size (z_size);  // Dynamic sizing

    // Create unscented distribution
    kappa = observe_Kappa(x_size);
    Float x_kappa = Float(x_size) + kappa;
    unscented (XX, x, X, x_kappa);

    // Predict points of XX using supplied observation model
    {
        Vec zXXi(z_size), zXX0(z_size);
        zXX0 = static_cast<Correlated_additive_observe_model&>(observe_model).h( column(XX,0) );
        column(zXX,0) = zXX0;
        for (std::size_t i = 1; i < XX.size2(); ++i) {
            zXXi = static_cast<Correlated_additive_observe_model&>(observe_model).h( column(XX,i) );
            // Normalise relative to zXX0
            observe_model.normalise (zXXi, zXX0);
            column(zXX,i) = zXXi;
        }
    }

    // Mean of predicted distribution: z_p
    noalias(z_p) = column(zXX,0) * kappa;
    for (std::size_t i = 1; i < zXX.size2(); ++i) {
        noalias(z_p) += column(zXX,i) / Float(2); // ISSUE uBlas may not be able to promote integer 2
    }
    z_pred = z_p /= x_kappa;

    // Covariance of observation predict: Xzz
    // Subtract mean from each point in zXX
    for (std::size_t i = 0; i < XX_size; ++i) {
        column(zXX,i).minus_assign (z_p);
    }
    // Center point, premult here by 2 for efficency
    {
        ColMatrix::Column zXX0 = column(zXX,0);
        noalias(Xzz) = FM::outer_prod(zXX0, zXX0);
        Xzz *= 2*kappa;
    }
    // Remaining unscented points
    for (std::size_t i = 1; i < zXX.size2(); ++i) {
        ColMatrix::Column zXXi = column(zXX,i);
        noalias(Xzz) += FM::outer_prod(zXXi, zXXi);
    }
    Xzz /= 2*x_kappa;


    /** Fix for non-positive semidefinite covariance **/
    {
        ColMatrix::Column zXX0 = column(zXX,0);      // mean zp already subtracted above
        noalias(Xzz) += FM::outer_prod(zXX0, zXX0);  // modified covariance about mean
    }


    R_pred = Xzz;

//    // Correlation of state with observation: Xxz
//    // Center point, premult here by 2 for efficency
//    {
//       noalias(Xxz) = FM::outer_prod(column(XX,0) - x, column(zXX,0));
//       Xxz *= 2*kappa;
//    }
//    // Remaining unscented points
//    for (std::size_t i = 1; i < zXX.size2(); ++i) {
//       noalias(Xxz) += FM::outer_prod(column(XX,i) - x, column(zXX,i));
//    }
//    Xxz /= 2* (Float(x_size) + kappa);
}