Example #1
0
void LinearizedKF::Update(const ompl::base::State *belief, const typename ObservationModelMethod::ObservationType& obs,
const LinearSystem& ls, ompl::base::State *updatedState)
{

    using namespace arma;

    colvec innov = this->observationModel_->computeInnovation(belief, obs);

    if(!innov.n_rows || !innov.n_cols)
    {
        updatedState = si_->cloneState(belief);
        return; // return the prediction if you don't have any innovation
    }

    assert(innov.n_rows);
    assert(innov.n_cols);

    mat covPred = belief->as<StateType>()->getCovariance();

    mat rightMatrix = ls.getH() * covPred * trans(ls.getH()) + ls.getR();
    mat leftMatrix = covPred * trans(ls.getH());
    mat KalmanGain = solve(trans(rightMatrix), trans(leftMatrix));
    KalmanGain = KalmanGain.t();

    colvec xPredVec = belief->as<StateType>()->getArmaData();

    colvec xEstVec = xPredVec + KalmanGain*innov;

    updatedState->as<StateType>()->setXYYaw(xEstVec[0], xEstVec[1], xEstVec[2]);

    mat covEst = covPred - KalmanGain* ls.getH() * covPred;

    updatedState->as<StateType>()->setCovariance(covEst);

 }
Example #2
0
arma::mat LinearizedKF::computeStationaryCovariance (const LinearSystem& ls)
{
    using namespace arma;

    mat H = ls.getH();
    mat M = ls.getM();
    mat R = ls.getR();

    mat Pprd;
    bool dareSolvable = dare (trans(ls.getA()),trans(ls.getH()),ls.getG() * ls.getQ() * trans(ls.getG()),
    ls.getM() * ls.getR() * trans(ls.getM()), Pprd );

    if(!dareSolvable)
    {
        OMPL_ERROR("Dare Unsolvable: The given system state is not stable/observable. Try a different state.");
        exit(1);
    }

    //makes this symmetric
    Pprd = (Pprd + trans(Pprd)) / 2;

    mat Pest = Pprd - ( Pprd * H.t()) * inv( H*Pprd*H.t() + M * R * M.t(), true) * trans(Pprd * H.t()) ;

    //makes this symmetric
    Pest = (Pest + trans(Pest)) / 2;

    return Pest;

}
Example #3
0
	virtual void onIterationStart() {
		// Only need to do this once
		rA = right->A( nextTime() );
		rb = right->b( nextTime() );
		lA =  left->A( nextTime() );
		lb =  left->b( nextTime() );

		// Evaluate the predicate using the previous iterand
		Vector predicate = rA * iterand(0) - rb;

		Vector compare = domain->zero();
		if(direct) {
			compare = lA * iterand(0) - lb;
		}

		// Initialize penalty matrix
		P.setZero();
		P.reserve( IntegerVector::Constant( domain->size(), 1 ) );

		// Initialize other matrix
		Q.setZero();
		Q.reserve( IntegerVector::Constant( domain->size(), 1 ) );

		// Build penalty matrix
		for(Index i = 0; i < domain->size(); ++i) {
			const bool c = Order()(
				(scale * predicate(i)),
				compare(i)
			);
			if(c) { P.insert(i, i) = scale; }
			if(!direct || !c) { Q.insert(i, i) = 1.; }
		}
	}
Example #4
0
void LinearizedKF::Predict(const ompl::base::State *belief,
  const ompl::control::Control* control, const LinearSystem& ls, ompl::base::State *predictedState)
{
    using namespace arma;

    this->motionModel_->Evolve(belief, control,this->motionModel_->getZeroNoise(), predictedState);

    mat covPred = ls.getA() * belief->as<StateType>()->getCovariance() * trans(ls.getA()) +
    ls.getG() * ls.getQ() * trans(ls.getG());

    predictedState->as<StateType>()->setCovariance(covPred);

}
Example #5
0
void
SpikeSolver::doSweeps(LinearSystem& ls, const int nSweeps)
{
  const MultiFieldMatrix& m = ls.getMatrix();
  MultiField& delta = ls.getDelta();
  const MultiField& b = ls.getB();
  MultiField& r = ls.getResidual();

  for(int i=0; i<nSweeps; i++)
  {
      m.spikeSolve(delta,b,r,_spikeStorage);
  }
}
Example #6
0
MFRPtr
SpikeSolver::solve(LinearSystem & ls)
{
  ls.getMatrix().computeResidual(ls.getDelta(),
                                 ls.getB(),
                                 ls.getResidual());
  MFRPtr rNorm0(ls.getResidual().getOneNorm());

#ifdef FVM_PARALLEL
  if (verbosity >0 && MPI::COMM_WORLD.Get_rank() == 0 )
    cout << "0: " << *rNorm0 << "procID = " << MPI::COMM_WORLD.Get_rank() << endl;
#endif

#ifndef FVM_PARALLEL
   if ( verbosity > 0 )
      cout << "0: " << *rNorm0 << endl;
#endif

  if (*rNorm0 < absoluteTolerance )
    return rNorm0;

  for(int i=1; i<nMaxIterations; i++)
  {
      doSweeps(ls,1);

      ls.getMatrix().computeResidual(ls.getDelta(),
                                     ls.getB(),
                                     ls.getResidual());
      MFRPtr rNorm(ls.getResidual().getOneNorm());
      MFRPtr normRatio((*rNorm)/(*rNorm0));

#ifndef FVM_PARALLEL
      if (verbosity >0  )
        cout << i << ": " <<  *rNorm << endl;
#endif


#ifdef FVM_PARALLEL
     if (*rNorm < absoluteTolerance || *normRatio < relativeTolerance || i == nMaxIterations-1)
        if (verbosity >0 && MPI::COMM_WORLD.Get_rank() == 0  )
        cout <<i << ": " << "procID = " << MPI::COMM_WORLD.Get_rank() <<  *rNorm << endl;
#endif

      if (*rNorm < absoluteTolerance || *normRatio < relativeTolerance)
         break;
      
  }
  return rNorm0;
}
Example #7
0
int main() {
	cout << "Linear equation systems. Gauss method.\nPodkopaev Anton, 345 group\n";
	
	
	CurrentType coefArr[] = {
		6.5126e-06, -8.0648e-03, 4.23528,
		5.9176e-03,    -0.80648, 1.46528,
		   0.87176,     0.79352, 0.91528
	};
	/*
	CurrentType coefArr[] = {
		1, 0.1,  0,
		0,   1,  0,
		0,   0,  1
	};
	*/

	Matrix<CurrentType> systemCoef(3, 3, coefArr);

	CurrentType constTermArr[] = {
		3.61628,
		1.52097,
		1.81150
	};
	/*
	CurrentType constTermArr[] = {
		1,
		2,
		3
	};
	*/

	Matrix<CurrentType> constTerm(3, 1, constTermArr);

	LinearSystem<CurrentType>* sys = LinearSystem<CurrentType>::createSystem(systemCoef, constTerm);
	
	//cout << (*sys);
	//cout << endl << endl;

	sys->gaussMethod();
	sys->gaussMethodWithChoosingInLine();

	if (sys)
		delete sys;

	return 0;
}
    void TestChebyshevVsCG() throw (Exception)
    {
        unsigned num_nodes = 1331;
        DistributedVectorFactory factory(num_nodes);
        Vec parallel_layout = factory.CreateVec(2);

        unsigned cg_its;
        unsigned chebyshev_its;

        Timer::Reset();
        {
            Mat system_matrix;
            // Note that this test deadlocks if the file's not on the disk
            PetscTools::ReadPetscObject(system_matrix, "linalg/test/data/matrices/cube_6000elems_half_activated.mat", parallel_layout);

            Vec system_rhs;
            // Note that this test deadlocks if the file's not on the disk
            PetscTools::ReadPetscObject(system_rhs, "linalg/test/data/matrices/cube_6000elems_half_activated.vec", parallel_layout);

            LinearSystem ls = LinearSystem(system_rhs, system_matrix);

            ls.SetMatrixIsSymmetric();
            ls.SetAbsoluteTolerance(1e-9);
            ls.SetKspType("cg");
            ls.SetPcType("bjacobi");

            Vec solution = ls.Solve();

            cg_its = ls.GetNumIterations();

            PetscTools::Destroy(system_matrix);
            PetscTools::Destroy(system_rhs);
            PetscTools::Destroy(solution);
        }
        Timer::PrintAndReset("CG");

        {
            Mat system_matrix;
            // Note that this test deadlocks if the file's not on the disk
            PetscTools::ReadPetscObject(system_matrix, "linalg/test/data/matrices/cube_6000elems_half_activated.mat", parallel_layout);

            Vec system_rhs;
            // Note that this test deadlocks if the file's not on the disk
            PetscTools::ReadPetscObject(system_rhs, "linalg/test/data/matrices/cube_6000elems_half_activated.vec", parallel_layout);

            LinearSystem ls = LinearSystem(system_rhs, system_matrix);

            ls.SetMatrixIsSymmetric();
            ls.SetAbsoluteTolerance(1e-9);
            ls.SetKspType("chebychev");
            ls.SetPcType("bjacobi");

            Vec solution = ls.Solve();

            chebyshev_its = ls.GetNumIterations();

            PetscTools::Destroy(system_matrix);
            PetscTools::Destroy(system_rhs);
            PetscTools::Destroy(solution);
        }
        Timer::Print("Chebyshev");

        TS_ASSERT_LESS_THAN(cg_its, 15u); // Takes 14 iterations with 16 cores
        TS_ASSERT_LESS_THAN(chebyshev_its, 17u); // Takes 16 iterations with 16 cores

        PetscTools::Destroy(parallel_layout);
    }
    void TestChebyshevAdaptiveVsNoAdaptive() throw (Exception)
    {
        unsigned num_nodes = 1331;
        DistributedVectorFactory factory(num_nodes);
        Vec parallel_layout = factory.CreateVec(2);

        // Solving with zero guess for coverage
        Vec zero_guess = factory.CreateVec(2);
        double zero = 0.0;
#if (PETSC_VERSION_MAJOR == 2 && PETSC_VERSION_MINOR == 2)
        VecSet(&zero, zero_guess);
#else
        VecSet(zero_guess, zero);
#endif

        Mat system_matrix;
        // Note that this test deadlocks if the file's not on the disk
        PetscTools::ReadPetscObject(system_matrix, "linalg/test/data/matrices/cube_6000elems_half_activated.mat", parallel_layout);

        Vec system_rhs;
        // Note that this test deadlocks if the file's not on the disk
        PetscTools::ReadPetscObject(system_rhs, "linalg/test/data/matrices/cube_6000elems_half_activated.vec", parallel_layout);

        // Make sure we are not inheriting a non-default number of iterations from previous test
        std::stringstream num_it_str;
        num_it_str << 1000;
        PetscOptionsSetValue("-ksp_max_it", num_it_str.str().c_str());

        try
        {
            LinearSystem ls = LinearSystem(system_rhs, system_matrix);

            ls.SetMatrixIsSymmetric();
            // Solve to relative convergence for coverage
            ls.SetRelativeTolerance(1e-6);
            ls.SetPcType("jacobi");
            ls.SetKspType("chebychev");
            ls.SetUseFixedNumberIterations(true, 64);

            // Solving with zero guess for coverage.
            Vec solution = ls.Solve(zero_guess);
            unsigned chebyshev_adaptive_its = ls.GetNumIterations();

            TS_ASSERT_EQUALS(chebyshev_adaptive_its, 40u);
            TS_ASSERT_DELTA(ls.mEigMin, 0.0124, 1e-4);
            TS_ASSERT_DELTA(ls.mEigMax, 1.8810, 1e-4);

            PetscTools::Destroy(solution);
        }
        catch (Exception& e)
        {
            if (e.GetShortMessage() == "Chebyshev with fixed number of iterations is known to be broken in PETSc <= 2.3.2")
            {
                WARNING(e.GetShortMessage());
            }
            else
            {
                TS_FAIL(e.GetShortMessage());
            }
        }


        // Make sure we are not inheriting a non-default number of iterations from previous test
        PetscOptionsSetValue("-ksp_max_it", num_it_str.str().c_str());
        {
            LinearSystem ls = LinearSystem(system_rhs, system_matrix);

            ls.SetMatrixIsSymmetric();
            ls.SetRelativeTolerance(1e-6);
            ls.SetPcType("jacobi");
            ls.SetKspType("chebychev");

            Vec solution = ls.Solve(zero_guess);
            unsigned chebyshev_no_adaptive_its = ls.GetNumIterations();

            TS_ASSERT_LESS_THAN(chebyshev_no_adaptive_its, 100u); // Normally 88, but 99 on maverick & natty
            TS_ASSERT_DELTA(ls.mEigMin, 0.0124, 1e-4);
            TS_ASSERT_DELTA(ls.mEigMax, 1.8841, 1e-4);

            PetscTools::Destroy(solution);
        }

        // Make sure we are not inheriting a non-default number of iterations from previous test
        PetscOptionsSetValue("-ksp_max_it", num_it_str.str().c_str());
        {
            LinearSystem ls = LinearSystem(system_rhs, system_matrix);

            ls.SetMatrixIsSymmetric();
            ls.SetRelativeTolerance(1e-6);
            ls.SetPcType("jacobi");
            ls.SetKspType("cg");
            Vec solution = ls.Solve(zero_guess);
            unsigned cg_its = ls.GetNumIterations();

            TS_ASSERT_EQUALS(cg_its, 40u);
            TS_ASSERT_EQUALS(ls.mEigMin, DBL_MAX);
            TS_ASSERT_EQUALS(ls.mEigMax, DBL_MIN);

            PetscTools::Destroy(solution);
        }

        PetscTools::Destroy(system_matrix);
        PetscTools::Destroy(system_rhs);

        PetscTools::Destroy(parallel_layout);
        PetscTools::Destroy(zero_guess);
    }
bool ConjugateGradientSolver<Scalar>::solveWithoutPreconditioner(const LinearSystem<Scalar> &system,
                                            const GeneralizedVector<Scalar> &b,
                                            GeneralizedVector<Scalar> &x)
{
    //see b2. of the reference for notations
    this->iterations_used_ = 0;
    Scalar tolerance_sqr = (this->tolerance_)*(this->tolerance_);
    GeneralizedVector<Scalar> *r = b.clone(); //create a vector with the same type with b
    system.multiply(x,*r); //r = Ax
    (*r) *= -1;
    (*r) += b; //r = b - Ax
    system.filter(*r); //filter procedure to keep invariant of some components
    GeneralizedVector<Scalar> *d = r->clone();
    Scalar delta_0 = system.innerProduct(*r, *r); 
    Scalar delta = delta_0;
    GeneralizedVector<Scalar> *q = d->clone();
    GeneralizedVector<Scalar> *temp = d->clone();
    this->residual_magnitude_sqr_ = delta;
    while((this->iterations_used_ < this->max_iterations_) && (delta > tolerance_sqr*delta_0))
    {
        system.multiply(*d,*q);
        system.filter(*q);
        Scalar alpha = delta / (system.innerProduct(*d, *q));
        *temp = *d;
        *temp *= alpha;
        x += *temp; //x = x + alpha*d
        if((this->iterations_used_)%50 == 0) //direct compute residual every 50 iterations
        {
            system.multiply(x,*r);
            (*r) *= -1;
            (*r) += b; //r = b - Ax
            system.filter(*r);
        }
        else
        {
            *temp = *q;
            *temp *= alpha;
            *r -= *temp; //r = r - alpha*q
        }
        Scalar delta_old = delta;
        delta = system.innerProduct(*r, *r);
        Scalar beta = delta/delta_old;
        *temp = *d;
        *temp *= beta;
        *d = *r;
        *d += *temp;  //d = r + beta*d
        system.filter(*d); 
        this->iterations_used_ = this->iterations_used_ + 1;
        this->residual_magnitude_sqr_ = delta;
    }
    delete r;
    delete d;
    delete q;
    delete temp;
    //return false if didn't converge with maximum iterations
    bool status = delta > tolerance_sqr*delta_0 ? false : true;
    if (this->status_log_)
    {
        std::cout << "CG solver ";
        if (!status)
            std::cout << "did not converge";
        else
            std::cout << "converged";
        std::cout << " in " << this->iterations_used_ << " iterations, residual: " << this->residualMagnitude() << ".\n";
    }
    return status;
}
    void TestBasicFunctionality() throw (Exception)
    {
        /*
         * We need to make sure here that the matrix is loaded with the appropriate parallel layout. Petsc's
         * default puts 1331 rows in each processor. This wouldn't be possible in a real bidomain simulation
         * because implies that equations V_665 an Phi_e_665 are solved in different processors.
         */
        unsigned num_nodes = 1331;
        DistributedVectorFactory factory(num_nodes);
        Vec parallel_layout = factory.CreateVec(2);

        Mat system_matrix;
        PetscTools::ReadPetscObject(system_matrix, "linalg/test/data/matrices/cube_6000elems_half_activated.mat", parallel_layout);

        PetscTools::Destroy(parallel_layout);

        // Set rhs = A * [1 0 1 0 ... 1 0]'
        Vec one_zeros = factory.CreateVec(2);
        Vec rhs = factory.CreateVec(2);

        for (unsigned node_index=0; node_index<2*num_nodes; node_index+=2)
        {
            PetscVecTools::SetElement(one_zeros, node_index, 1.0);
            PetscVecTools::SetElement(one_zeros, node_index+1, 0.0);
        }
        PetscVecTools::Finalise(one_zeros);

        MatMult(system_matrix, one_zeros, rhs);
        PetscTools::Destroy(one_zeros);

        LinearSystem ls = LinearSystem(rhs, system_matrix);

        ls.SetAbsoluteTolerance(1e-9);
        ls.SetKspType("cg");
        ls.SetPcType("none");

        ls.AssembleFinalLinearSystem();

        Vec solution = ls.Solve();

        DistributedVector distributed_solution = factory.CreateDistributedVector(solution);
        DistributedVector::Stripe vm(distributed_solution, 0);
        DistributedVector::Stripe phi_e(distributed_solution, 1);

        for (DistributedVector::Iterator index = distributed_solution.Begin();
             index!= distributed_solution.End();
             ++index)
        {
            /*
             * Although we're trying to enforce the solution to be [1 0 ... 1 0], the system is singular and
             * therefore it has infinite solutions. I've (migb) found that the use of different preconditioners
             * lead to different solutions ([0.8 -0.2 ... 0.8 -0.2], [0.5 -0.5 ... 0.5 -0.5], ...)
             *
             * If we were using PETSc null space, it would find the solution that satisfies x'*v=0,
             * being v the null space of the system (v=[1 1 ... 1])
             */
            TS_ASSERT_DELTA(vm[index] - phi_e[index], 1.0, 1e-6);
        }

        // Coverage (setting PC type after first solve)
        ls.SetPcType("blockdiagonal");

        PetscTools::Destroy(system_matrix);
        PetscTools::Destroy(rhs);
        PetscTools::Destroy(solution);

#if (PETSC_VERSION_MAJOR == 3 && PETSC_VERSION_MINOR <= 3) //PETSc 3.0 to PETSc 3.3
        //The PETSc developers changed this one, but later changed it back again!
        const PCType pc;
#else
        PCType pc;
#endif
        PC prec;
        KSPGetPC(ls.mKspSolver, &prec);
        PCGetType(prec, &pc);
        // Although we call it "blockdiagonal", PETSc considers this PC a generic SHELL preconditioner
        TS_ASSERT( strcmp(pc,"shell")==0 );

    }
    void TestBetterThanNoPreconditioning()
    {
        unsigned num_nodes = 1331;
        DistributedVectorFactory factory(num_nodes);
        Vec parallel_layout = factory.CreateVec(2);

        unsigned point_jacobi_its;
        unsigned block_diag_its;

        Timer::Reset();
        {
            Mat system_matrix;
            // Note that this test deadlocks if the file's not on the disk
            PetscTools::ReadPetscObject(system_matrix, "linalg/test/data/matrices/cube_6000elems_half_activated.mat", parallel_layout);

            Vec system_rhs;
            // Note that this test deadlocks if the file's not on the disk
            PetscTools::ReadPetscObject(system_rhs, "linalg/test/data/matrices/cube_6000elems_half_activated.vec", parallel_layout);

            LinearSystem ls = LinearSystem(system_rhs, system_matrix);

            ls.SetAbsoluteTolerance(1e-9);
            ls.SetKspType("cg");
            ls.SetPcType("none");

            Vec solution = ls.Solve();

            point_jacobi_its = ls.GetNumIterations();

            PetscTools::Destroy(system_matrix);
            PetscTools::Destroy(system_rhs);
            PetscTools::Destroy(solution);
        }
        Timer::PrintAndReset("No preconditioning");

        {
            Mat system_matrix;
            // Note that this test deadlocks if the file's not on the disk
            PetscTools::ReadPetscObject(system_matrix, "linalg/test/data/matrices/cube_6000elems_half_activated.mat", parallel_layout);

            Vec system_rhs;
            // Note that this test deadlocks if the file's not on the disk
            PetscTools::ReadPetscObject(system_rhs, "linalg/test/data/matrices/cube_6000elems_half_activated.vec", parallel_layout);

            LinearSystem ls = LinearSystem(system_rhs, system_matrix);

            ls.SetAbsoluteTolerance(1e-9);
            ls.SetKspType("cg");
            ls.SetPcType("blockdiagonal");

            Vec solution = ls.Solve();

            block_diag_its = ls.GetNumIterations();

            // Coverage (setting PC type after using blockdiagonal solve)
            ls.SetPcType("blockdiagonal");

            PetscTools::Destroy(system_matrix);
            PetscTools::Destroy(system_rhs);
            PetscTools::Destroy(solution);
        }
        Timer::Print("Block diagonal preconditioner");

        std::cout << block_diag_its << " " << point_jacobi_its << std::endl;
        TS_ASSERT_LESS_THAN_EQUALS(block_diag_its, point_jacobi_its);

        PetscTools::Destroy(parallel_layout);
    }