コード例 #1
0
  virtual void test() {
    std::vector<std::pair<boost::numeric::ublas::vector<double>, boost::numeric::ublas::vector<double> > > train = DataSetMNIST::get_train();

    train.resize(100);

    png::image<png::rgb_pixel> image(28 * train.size(), 28);
    for (int z = 0; z < train.size(); z++) {
      boost::numeric::ublas::vector<double> f = f_shallow(train[z].first);

      for (std::size_t y = 0; y < 28; ++y) {
        for (std::size_t x = 0; x < 28; ++x) {
          double c = f[y * 28 + x] + 1;
          c *= 127.5;
          char b = c;
          image[y][x + z * 28] = png::rgb_pixel(b, b, b);
        }
      }
    }
    image.write("badass.png");

    for (std::size_t i = 0; i < train.size(); i++) {
      boost::numeric::ublas::vector<double> t0 = m_network->f(train[i].first);
      boost::numeric::ublas::vector<double> t1 = f_shallow(train[i].first);
      std::cout << "final diff " << norm_1(t0 - t1) << std::endl;

      for (int i = 0; i < t0.size(); i++) {
        // std::cout << train[i].first[i] << " " << t0[i] << " " << t1[i] << std::endl;
      }
    }
  }
コード例 #2
0
ファイル: Vector.cpp プロジェクト: bobzabcik/OpenStudio
 bool operator==(const Vector& lhs, const Vector& rhs)
 {
   bool result = false;
   if (lhs.size() == rhs.size()){
     result = (norm_1(lhs-rhs) == 0);
   }
   return result;
 }
コード例 #3
0
ファイル: matrix.cpp プロジェクト: wzhang021484/CSC431Project
matrix exp(matrix x, double ap, double rp,int ns)
{
	matrix t = my_identity(x.cols);
	matrix s = my_identity(x.cols);

	for (int k=0; k<ns; k++)
	{
		t = t*x/k;   // next term
		s = s + t;   // add next term
		if (norm_1(t)<max(ap,norm_1(s)*rp))
		{
			return s;
		}
	}

	cout << "error! exp: no convergence!\n";
	exit(-1);
}
コード例 #4
0
c_vector<double, SPACE_DIM+1> Element<ELEMENT_DIM, SPACE_DIM>::CalculateInterpolationWeightsWithProjection(const ChastePoint<SPACE_DIM>& rTestPoint)
{
    //Can only test if it's a tetrahedral mesh in 3d, triangles in 2d...
    assert(ELEMENT_DIM == SPACE_DIM);

    c_vector<double, SPACE_DIM+1> weights = CalculateInterpolationWeights(rTestPoint);

    // Check for negative weights and set them to zero.
    bool negative_weight = false;

    for (unsigned i=0; i<=SPACE_DIM; i++)
    {
        if (weights[i] < 0.0)
        {
            weights[i] = 0.0;

            negative_weight = true;
        }
    }

    if (negative_weight == false)
    {
        // If there are no negative weights, there is nothing to do.
        return weights;
    }

    // Renormalise so that all weights add to 1.0.

    // Note that all elements of weights are now non-negative and so the l1-norm (sum of magnitudes) is equivalent to the sum of the elements of the vector
    double sum = norm_1 (weights);

    //l1-norm ought to be above 1 (because we scrubbed negative weights)
    //However, if we scrubbed weights that were the size of the machine precision then we might be close to one (even less than 1).
    assert( sum + DBL_EPSILON >= 1.0);

    //We might skip this division when sum ~= 1
    weights = weights/sum;

    return weights;
}
コード例 #5
0
ファイル: test_vec.cpp プロジェクト: kgourgou/fmmtl
int main() {
    Vec<3,double> v0;
    std::cout << v0 << std::endl;

    Vec<3,double> v1 = Vec<3,double>(0.,1.,2);

    Vec<3,double> v2 = Vec<3,double>(3,4,5);

    Vec<3,double> v3 = v1 * (v2 + v2);

    std::cout << v3 << std::endl;
    std::cout << norm(v3) << std::endl;
    std::cout << norm_2(v3) << std::endl;

    Vec<4, double> v = Vec<4,double>(1, 2.1f, 3.14, 2u);

    std::cout << norm_1(v) << std::endl;
    std::cout << norm_2(v) << std::endl;
    std::cout << norm_inf(v) << std::endl;
    std::cout << v << std::endl;

    return 0;
}
コード例 #6
0
dgematrix expm(const dgematrix &A)
{
      int  m_vals[5] = {3,5,7,9,13};
      long double theta[5] = {1.495585217958292e-002,
                              2.539398330063230e-001,
                              9.504178996162932e-001,
                              2.097847961257068e+000,
                              5.371920351148152e+000};
      
      
      double normA = norm_1(A);
      int i;
      int s;
      int p;
      dgematrix F;
      if (normA <= theta[4])
            {
                  for (i=0;i<5;i++)
                        if(normA <= theta[i])
                              {
                                    F = PadeApproximantOfDegree(A,m_vals[i]);
                                    break;
                              }
            }
      else
            {
                  s = (int)(log2(normA/theta[4])) + 1;
                  p = (int)pow(2.,s);
                  F = PadeApproximantOfDegree(A/p,m_vals[4]);
                  for( i=0 ; i < s ; i++)
                        F*=F;
                  
                        
            }
      return F;
 }
コード例 #7
0
ファイル: blas.hpp プロジェクト: imos/icfpc2015
 typename type_traits<typename V::value_type>::real_type
 asum (const V &v) {
     return norm_1 (v);
 }
コード例 #8
0
ファイル: matrix.cpp プロジェクト: wzhang021484/CSC431Project
double condition_number(const matrix& A) 
{
	return norm_1(A)/norm_1(inv(A));
}
コード例 #9
0
ファイル: matrix.cpp プロジェクト: wzhang021484/CSC431Project
bool operator==(const matrix& A, const matrix& B) 
{
    return (norm_1(A-B)<PRECISION);
}
コード例 #10
0
ファイル: sqp_internal.cpp プロジェクト: kozatt/casadi
void SQPInternal::evaluate(int nfdir, int nadir){
  casadi_assert(nfdir==0 && nadir==0);
  
  checkInitialBounds();
  
  // Get problem data
  const vector<double>& x_init = input(NLP_X_INIT).data();
  const vector<double>& lbx = input(NLP_LBX).data();
  const vector<double>& ubx = input(NLP_UBX).data();
  const vector<double>& lbg = input(NLP_LBG).data();
  const vector<double>& ubg = input(NLP_UBG).data();
  
  // Set the static parameter
  if (parametric_) {
    const vector<double>& p = input(NLP_P).data();
    if (!F_.isNull()) F_.setInput(p,F_.getNumInputs()-1);
    if (!G_.isNull()) G_.setInput(p,G_.getNumInputs()-1);
    if (!H_.isNull()) H_.setInput(p,H_.getNumInputs()-1);
    if (!J_.isNull()) J_.setInput(p,J_.getNumInputs()-1);
  }
    
  // Set linearization point to initial guess
  copy(x_init.begin(),x_init.end(),x_.begin());
  
  // Lagrange multipliers of the NLP
  fill(mu_.begin(),mu_.end(),0);
  fill(mu_x_.begin(),mu_x_.end(),0);

  // Initial constraint Jacobian
  eval_jac_g(x_,gk_,Jk_);
  
  // Initial objective gradient
  eval_grad_f(x_,fk_,gf_);
  
  // Initialize or reset the Hessian or Hessian approximation
  reg_ = 0;
  if( hess_mode_ == HESS_BFGS){
    reset_h();
  } else {
    eval_h(x_,mu_,1.0,Bk_);
  }

  // Evaluate the initial gradient of the Lagrangian
  copy(gf_.begin(),gf_.end(),gLag_.begin());
  if(m_>0) DMatrix::mul_no_alloc_tn(Jk_,mu_,gLag_);
  // gLag += mu_x_;
  transform(gLag_.begin(),gLag_.end(),mu_x_.begin(),gLag_.begin(),plus<double>());

  // Number of SQP iterations
  int iter = 0;

  // Number of line-search iterations
  int ls_iter = 0;
  
  // Last linesearch successfull
  bool ls_success = true;
  
  // Reset
  merit_mem_.clear();
  sigma_ = 0.;    // NOTE: Move this into the main optimization loop

  // Default stepsize
  double t = 0;
  
  // MAIN OPTIMIZATION LOOP
  while(true){
    
    // Primal infeasability
    double pr_inf = primalInfeasibility(x_, lbx, ubx, gk_, lbg, ubg);
    
    // 1-norm of lagrange gradient
    double gLag_norm1 = norm_1(gLag_);
    
    // 1-norm of step
    double dx_norm1 = norm_1(dx_);
    
    // Print header occasionally
    if(iter % 10 == 0) printIteration(cout);
    
    // Printing information about the actual iterate
    printIteration(cout,iter,fk_,pr_inf,gLag_norm1,dx_norm1,reg_,ls_iter,ls_success);
    
    // Call callback function if present
    if (!callback_.isNull()) {
      callback_.input(NLP_COST).set(fk_);
      callback_.input(NLP_X_OPT).set(x_);
      callback_.input(NLP_LAMBDA_G).set(mu_);
      callback_.input(NLP_LAMBDA_X).set(mu_x_);
      callback_.input(NLP_G).set(gk_);
      callback_.evaluate();
      
      if (callback_.output(0).at(0)) {
        cout << endl;
        cout << "CasADi::SQPMethod: aborted by callback..." << endl;
        break;
      }
    }
    
    // Checking convergence criteria
    if (pr_inf < tol_pr_ && gLag_norm1 < tol_du_){
      cout << endl;
      cout << "CasADi::SQPMethod: Convergence achieved after " << iter << " iterations." << endl;
      break;
    }
    
    if (iter >= maxiter_){
      cout << endl;
      cout << "CasADi::SQPMethod: Maximum number of iterations reached." << endl;
      break;
    }
    
    // Start a new iteration
    iter++;
    
    // Formulate the QP
    transform(lbx.begin(),lbx.end(),x_.begin(),qp_LBX_.begin(),minus<double>());
    transform(ubx.begin(),ubx.end(),x_.begin(),qp_UBX_.begin(),minus<double>());
    transform(lbg.begin(),lbg.end(),gk_.begin(),qp_LBA_.begin(),minus<double>());
    transform(ubg.begin(),ubg.end(),gk_.begin(),qp_UBA_.begin(),minus<double>());

    // Solve the QP
    solve_QP(Bk_,gf_,qp_LBX_,qp_UBX_,Jk_,qp_LBA_,qp_UBA_,dx_,qp_DUAL_X_,qp_DUAL_A_);
    log("QP solved");

    // Detecting indefiniteness
    double gain = quad_form(dx_,Bk_);
    if (gain < 0){
      casadi_warning("Indefinite Hessian detected...");
    }
        
    // Calculate penalty parameter of merit function
    sigma_ = std::max(sigma_,1.01*norm_inf(qp_DUAL_X_));
    sigma_ = std::max(sigma_,1.01*norm_inf(qp_DUAL_A_));

    // Calculate L1-merit function in the actual iterate
    double l1_infeas = primalInfeasibility(x_, lbx, ubx, gk_, lbg, ubg);

    // Right-hand side of Armijo condition
    double F_sens = inner_prod(dx_, gf_);    
    double L1dir = F_sens - sigma_ * l1_infeas;
    double L1merit = fk_ + sigma_ * l1_infeas;

    // Storing the actual merit function value in a list
    merit_mem_.push_back(L1merit);
    if (merit_mem_.size() > merit_memsize_){
      merit_mem_.pop_front();
    }
    // Stepsize
    t = 1.0;
    double fk_cand;
    // Merit function value in candidate
    double L1merit_cand = 0;

    // Reset line-search counter, success marker
    ls_iter = 0;
    ls_success = true;

    // Line-search
    log("Starting line-search");
    if(maxiter_ls_>0){ // maxiter_ls_== 0 disables line-search
      
      // Line-search loop
      while (true){
        for(int i=0; i<n_; ++i) x_cand_[i] = x_[i] + t * dx_[i];
      
        // Evaluating objective and constraints
        eval_f(x_cand_,fk_cand);
        eval_g(x_cand_,gk_cand_);
        ls_iter++;

        // Calculating merit-function in candidate
        l1_infeas = primalInfeasibility(x_cand_, lbx, ubx, gk_cand_, lbg, ubg);
      
        L1merit_cand = fk_cand + sigma_ * l1_infeas;
        // Calculating maximal merit function value so far
        double meritmax = *max_element(merit_mem_.begin(), merit_mem_.end());
        if (L1merit_cand <= meritmax + t * c1_ * L1dir){
          // Accepting candidate
	  log("Line-search completed, candidate accepted");
          break;
        }
      
        // Line-search not successful, but we accept it.
        if(ls_iter == maxiter_ls_){
          ls_success = false;
	  log("Line-search completed, maximum number of iterations");
          break;
        }
      
        // Backtracking
        t = beta_ * t;
      }
    }

    // Candidate accepted, update dual variables
    for(int i=0; i<m_; ++i) mu_[i] = t * qp_DUAL_A_[i] + (1 - t) * mu_[i];
    for(int i=0; i<n_; ++i) mu_x_[i] = t * qp_DUAL_X_[i] + (1 - t) * mu_x_[i];
    
    if( hess_mode_ == HESS_BFGS){
      // Evaluate the gradient of the Lagrangian with the old x but new mu (for BFGS)
      copy(gf_.begin(),gf_.end(),gLag_old_.begin());
      if(m_>0) DMatrix::mul_no_alloc_tn(Jk_,mu_,gLag_old_);
      // gLag_old += mu_x_;
      transform(gLag_old_.begin(),gLag_old_.end(),mu_x_.begin(),gLag_old_.begin(),plus<double>());
    }
    
    // Candidate accepted, update the primal variable
    copy(x_.begin(),x_.end(),x_old_.begin());
    copy(x_cand_.begin(),x_cand_.end(),x_.begin());

    // Evaluate the constraint Jacobian
    log("Evaluating jac_g");
    eval_jac_g(x_,gk_,Jk_);
    
    // Evaluate the gradient of the objective function
    log("Evaluating grad_f");
    eval_grad_f(x_,fk_,gf_);
    
    // Evaluate the gradient of the Lagrangian with the new x and new mu
    copy(gf_.begin(),gf_.end(),gLag_.begin());
    if(m_>0) DMatrix::mul_no_alloc_tn(Jk_,mu_,gLag_);
    // gLag += mu_x_;
    transform(gLag_.begin(),gLag_.end(),mu_x_.begin(),gLag_.begin(),plus<double>());

    // Updating Lagrange Hessian
    if( hess_mode_ == HESS_BFGS){
      log("Updating Hessian (BFGS)");
      // BFGS with careful updates and restarts
      if (iter % lbfgs_memory_ == 0){
        // Reset Hessian approximation by dropping all off-diagonal entries
        const vector<int>& rowind = Bk_.rowind();      // Access sparsity (row offset)
        const vector<int>& col = Bk_.col();            // Access sparsity (column)
        vector<double>& data = Bk_.data();             // Access nonzero elements
        for(int i=0; i<rowind.size()-1; ++i){          // Loop over the rows of the Hessian
          for(int el=rowind[i]; el<rowind[i+1]; ++el){ // Loop over the nonzero elements of the row
            if(i!=col[el]) data[el] = 0;               // Remove if off-diagonal entries
          }
        }
      }
      
      // Pass to BFGS update function
      bfgs_.setInput(Bk_,BFGS_BK);
      bfgs_.setInput(x_,BFGS_X);
      bfgs_.setInput(x_old_,BFGS_X_OLD);
      bfgs_.setInput(gLag_,BFGS_GLAG);
      bfgs_.setInput(gLag_old_,BFGS_GLAG_OLD);
      
      // Update the Hessian approximation
      bfgs_.evaluate();
      
      // Get the updated Hessian
      bfgs_.getOutput(Bk_);
    } else {
      // Exact Hessian
      log("Evaluating hessian");
      eval_h(x_,mu_,1.0,Bk_);
    }
  }
  
  // Save results to outputs
  output(NLP_COST).set(fk_);
  output(NLP_X_OPT).set(x_);
  output(NLP_LAMBDA_G).set(mu_);
  output(NLP_LAMBDA_X).set(mu_x_);
  output(NLP_G).set(gk_);
  
  // Save statistics
  stats_["iter_count"] = iter;
}
コード例 #11
0
    void TestDistancesToCorner() throw (Exception)
    {
        TrianglesMeshReader<3,3> mesh_reader("mesh/test/data/cube_21_nodes_side/Cube21"); // 5x5x5mm cube (internode distance = 0.25mm)

        TetrahedralMesh<3,3> mesh;
        mesh.ConstructFromMeshReader(mesh_reader);

        unsigned num_nodes=9261u;
        TS_ASSERT_EQUALS(mesh.GetNumNodes(), num_nodes); // 21x21x21 nodes
        TS_ASSERT_EQUALS(mesh.GetNumElements(), 48000u);
        TS_ASSERT_EQUALS(mesh.GetNumBoundaryElements(), 4800u);

        DistributedTetrahedralMesh<3,3> parallel_mesh(DistributedTetrahedralMeshPartitionType::DUMB); // No reordering;
        parallel_mesh.ConstructFromMeshReader(mesh_reader);
        TS_ASSERT_EQUALS(parallel_mesh.GetNumNodes(), num_nodes); // 21x21x21 nodes
        TS_ASSERT_EQUALS(parallel_mesh.GetNumElements(), 48000u);
        TS_ASSERT_EQUALS(parallel_mesh.GetNumBoundaryElements(), 4800u);

        unsigned far_index=9260u;
        c_vector<double,3> far_corner=mesh.GetNode(far_index)->rGetLocation();
        TS_ASSERT_DELTA( far_corner[0], 0.25, 1e-11);
        TS_ASSERT_DELTA( far_corner[1], 0.25, 1e-11);
        TS_ASSERT_DELTA( far_corner[2], 0.25, 1e-11);
        try
        {
            c_vector<double,3> parallel_far_corner=parallel_mesh.GetNode(far_index)->rGetLocation();
            TS_ASSERT_DELTA( parallel_far_corner[0], 0.25, 1e-11);
            TS_ASSERT_DELTA( parallel_far_corner[1], 0.25, 1e-11);
            TS_ASSERT_DELTA( parallel_far_corner[2], 0.25, 1e-11);
        }
        catch (Exception&)
        {
        }

        std::vector<unsigned> map_far_corner;
        map_far_corner.push_back(far_index);

        DistanceMapCalculator<3,3> distance_calculator(mesh);
        std::vector<double> distances;
        distance_calculator.ComputeDistanceMap(map_far_corner, distances);

        DistanceMapCalculator<3,3> parallel_distance_calculator(parallel_mesh);
        std::vector<double> parallel_distances;
        parallel_distance_calculator.ComputeDistanceMap(map_far_corner, parallel_distances);

        TS_ASSERT_EQUALS(distance_calculator.mRoundCounter, 1u);
        //Nodes in mesh are order such that a dumb partitioning will give a sequential handover from proc0 to proc1...
        TS_ASSERT_EQUALS(parallel_distance_calculator.mRoundCounter, PetscTools::GetNumProcs());
        //Note unsigned division is okay here
        TS_ASSERT_DELTA(parallel_distance_calculator.mPopCounter, num_nodes/PetscTools::GetNumProcs(), 1u);
        TS_ASSERT_DELTA(distance_calculator.mPopCounter, num_nodes, 1u);
        for (unsigned index=0; index<distances.size(); index++)
        {
            c_vector<double, 3> node = mesh.GetNode(index)->rGetLocation();

            //Straightline distance
            double euclidean_distance = norm_2(far_corner - node);
            // x + y + z distance
            double manhattan_distance = norm_1(far_corner - node);
            //If they differ, then allow the in-mesh distance to be in between
            double error_bound = (manhattan_distance - euclidean_distance)/2.0;
            //If they don't differ, then we expect the in-mesh distance to be similar
            if (error_bound < 1e-15)
            {
                error_bound = 1e-15;
            }
            TS_ASSERT_LESS_THAN_EQUALS(distances[index], manhattan_distance+DBL_EPSILON);
            TS_ASSERT_LESS_THAN_EQUALS(euclidean_distance, distances[index]+DBL_EPSILON);
            TS_ASSERT_DELTA(distances[index], euclidean_distance, error_bound);

            TS_ASSERT_DELTA(distances[index], parallel_distances[index], 1e-15);
        }

        // Test some point-to-point distances
        RandomNumberGenerator::Instance()->Reseed(1);
        unsigned trials=25;
        unsigned pops=0;
        unsigned sequential_pops=0;
        for (unsigned i=0; i<trials; i++)
        {
            unsigned index=RandomNumberGenerator::Instance()->randMod(parallel_distances.size());
            TS_ASSERT_DELTA(parallel_distance_calculator.SingleDistance(9260u, index), parallel_distances[index], 1e-15);
            TS_ASSERT_DELTA(distance_calculator.SingleDistance(9260u, index), parallel_distances[index], 1e-15);
            pops += parallel_distance_calculator.mPopCounter;
            sequential_pops += distance_calculator.mPopCounter;
            TS_ASSERT_LESS_THAN_EQUALS(parallel_distance_calculator.mRoundCounter, PetscTools::GetNumProcs()+2);
        }

        // Without A*: TS_ASSERT_DELTA(sequential_pops/(double)trials, num_nodes/2, 300);
        TS_ASSERT_LESS_THAN(sequential_pops/(double)trials, num_nodes/20.0);
        if (PetscTools::IsSequential())
        {
            //Early termination
            TS_ASSERT_EQUALS(pops, sequential_pops);
        }
        else
        {
            //Early termination on remote processes is not yet possible
            //This may lead to multiple updates from remote
            //A* Leads to even more updates on average
            // Without A*: TS_ASSERT_DELTA(pops/(double)trials, num_nodes/PetscTools::GetNumProcs(), 700.0);
            TS_ASSERT_LESS_THAN(pops/(double)trials, num_nodes/10.0 );
         }

        //Reverse - to check that cached information is flushed.
        for (unsigned i=0; i<3; i++)
        {
            unsigned index=RandomNumberGenerator::Instance()->randMod(parallel_distances.size());
            TS_ASSERT_DELTA(parallel_distance_calculator.SingleDistance(index, 9260u), parallel_distances[index], 1e-15);
        }
    }
コード例 #12
0
ファイル: expm.hpp プロジェクト: fengwang/larbed-refinement
    const matrix<T, D, A_>
    expm( const matrix<T, D, A_>& A )
    {
        typedef matrix<T, D, A_>                                                        matrix_type;
        typedef typename matrix_type::value_type                                        value_type_;
        typedef typename expm_private::fix_complex_value_type<value_type_>::value_type  value_type;
        typedef typename matrix_type::size_type                                         size_type;
        assert( A.row() == A.col() );
        static const value_type theta[] = { 0.000000000000000e+000, //0
                                            3.650024139523051e-008, //1
                                            5.317232856892575e-004, //2
                                            1.495585217958292e-002, //3
                                            8.536352760102745e-002, //4
                                            2.539398330063230e-001, //5
                                            5.414660951208968e-001, //6
                                            9.504178996162932e-001, //7
                                            1.473163964234804e+000, //8
                                            2.097847961257068e+000, //9
                                            2.811644121620263e+000, //10
                                            3.602330066265032e+000, //11
                                            4.458935413036850e+000, //12
                                            5.371920351148152e+000  //13
                                          };
        value_type const norm_A               = norm_1( A );
        //value_type const ratio                = theta[13] / norm_A;
        value_type const ratio                = norm_A / theta[13];
        size_type const s               = ratio < value_type(1) ? 0 : static_cast<size_type>( std::ceil( std::log2( ratio ) ) );
        value_type const s__2           = s ? value_type(1 << s) : value_type(1);

        /*       
        std::cerr << "Matrix expm starting with S__2 is " << s__2 << "\n";
        if ( std::abs( s__2 - 1.0 ) < 0.1  )
            std::cerr << "\tencountering matrix \n" << A << "\n";
        */
        

        matrix_type const& _A                   = A / s__2;
        size_type const n                    = _A.row();
        static value_type const c []    = { 0.000000000000000,  // 0
                                            //64764752532480000,  // 1
                                            6.4764752532480000e+16,  // 1
                                            //32382376266240000,  // 2
                                            3.2382376266240000e+16,  // 2
                                            //7771770303897600,   // 3
                                            7.771770303897600e+15,   // 3
                                            //1187353796428800,   // 4
                                            1.187353796428800e+15,   // 4
                                            //129060195264000,    // 5
                                            1.29060195264000e+14,    // 5
                                            //10559470521600,     // 6
                                            1.0559470521600e+13,     // 6
                                            //670442572800,       // 7
                                            6.70442572800e+11,       // 7
                                            //33522128640,        // 8
                                            3.3522128640e+10,        // 8
                                            //1323241920,         // 9
                                            1.323241920e+9,         // 9
                                            //40840800,           // 10
                                            4.0840800e+7,           // 10
                                            //960960,             // 11
                                            9.60960e+5,             // 11
                                            //16380,              // 12
                                            1.6380e+4,              // 12
                                            //182,                // 13
                                            1.82e+2,                // 13
                                            1                   // 14
                                        };
        matrix_type const& _A2 = _A * _A;
        matrix_type const& _A4 = _A2 * _A2;
        matrix_type const& _A6 = _A2 * _A4;
        matrix_type const& U = _A * ( _A6 * ( c[14] * _A6 + c[12] * _A4 + c[10] * _A2 ) + c[8] * _A6 + c[6] * _A4 + c[4] * _A2 + c[2] * eye<value_type>( n, n ) );
        matrix_type const& V = _A6 * ( c[13] * _A6 + c[11] * _A4 + c[9] * _A2 ) + c[7] * _A6 + c[5] * _A4 + c[3] * _A2 + c[1] * eye<value_type>( n, n );
        matrix_type const& VU = V + U;
        matrix_type const& UV = V - U;
        matrix_type F = VU / UV;
        //matrix_type      F   = ( V + U ) / ( V - U );

        ////!!!!!!!!!!!!!!!!
        /*
        matrix_type const& _A                   = A / s__2;
        matrix_type _a = _A;
        size_type const n                       = _A.row();
        auto F = eye<value_type_>( n, n );

        const unsigned long loops = 10;
        value_type factor = value_type(1);

        for ( unsigned long i = 0; i != loops; ++i )
        {
            factor /= i;
            F = F + _a * factor;
            if ( i + 1 == loops ) break;
            _a *= _A;
        }
        */
        ////!!!!!!!!!!!!!!!!
        for ( size_type i = 0; i != s; ++i )
            F *= F;

    
        //std::cerr << "EXPM ends.\n";

        return F;
    }
コード例 #13
0
ファイル: newton.cpp プロジェクト: BrechtBa/casadi
  void Newton::solveNonLinear() {
    casadi_msg("Newton::solveNonLinear:begin");

    // Set up timers for profiling
    double time_zero=0;
    double time_start=0;
    double time_stop=0;
    if (CasadiOptions::profiling && !CasadiOptions::profilingBinary) {
      time_zero = getRealTime();
      CasadiOptions::profilingLog  << "start " << this << ":" <<getOption("name") << std::endl;
    }

    // Pass the inputs to J
    for (int i=0; i<nIn(); ++i) {
      if (i!=iin_) jac_.setInput(input(i), i);
    }

    // Aliases
    DMatrix &u = output(iout_);
    DMatrix &J = jac_.output(0);
    DMatrix &F = jac_.output(1+iout_);

    // Perform the Newton iterations
    int iter=0;

    bool success = true;

    while (true) {
      // Break if maximum number of iterations already reached
      if (iter >= max_iter_) {
        log("evaluate", "Max. iterations reached.");
        stats_["return_status"] = "max_iteration_reached";
        success = false;
        break;
      }

      // Start a new iteration
      iter++;

      // Print progress
      if (monitored("step") || monitored("stepsize")) {
        userOut() << "Step " << iter << "." << std::endl;
      }

      if (monitored("step")) {
        userOut() << "  u = " << u << std::endl;
      }

      // Use u to evaluate J
      jac_.setInput(u, iin_);
      for (int i=0; i<nIn(); ++i)
        if (i!=iin_) jac_.setInput(input(i), i);

      if (CasadiOptions::profiling) {
        time_start = getRealTime(); // Start timer
      }

      jac_.evaluate();

      // Write out profiling information
      if (CasadiOptions::profiling && !CasadiOptions::profilingBinary) {
        time_stop = getRealTime(); // Stop timer
        CasadiOptions::profilingLog
            << (time_stop-time_start)*1e6 << " ns | "
            << (time_stop-time_zero)*1e3 << " ms | "
            << this << ":" << getOption("name") << ":0|" << jac_.get() << ":"
            << jac_.getOption("name") << "|evaluate jacobian" << std::endl;
      }

      if (monitored("F")) userOut() << "  F = " << F << std::endl;
      if (monitored("normF"))
        userOut() << "  F (min, max, 1-norm, 2-norm) = "
                  << (*std::min_element(F.data().begin(), F.data().end()))
                  << ", " << (*std::max_element(F.data().begin(), F.data().end()))
                  << ", " << norm_1(F) << ", " << norm_F(F) << std::endl;
      if (monitored("J")) userOut() << "  J = " << J << std::endl;

      double abstol = 0;
      if (numeric_limits<double>::infinity() != abstol_) {
        abstol = std::max((*std::max_element(F.data().begin(),
                                                  F.data().end())),
                               -(*std::min_element(F.data().begin(),
                                                   F.data().end())));
        if (abstol <= abstol_) {
          casadi_msg("Converged to acceptable tolerance - abstol: " << abstol_);
          break;
        }
      }

      // Prepare the linear solver with J
      linsol_.setInput(J, LINSOL_A);

      if (CasadiOptions::profiling) {
        time_start = getRealTime(); // Start timer
      }
      linsol_.prepare();
      // Write out profiling information
      if (CasadiOptions::profiling && !CasadiOptions::profilingBinary) {
        time_stop = getRealTime(); // Stop timer
        CasadiOptions::profilingLog
            << (time_stop-time_start)*1e6 << " ns | "
            << (time_stop-time_zero)*1e3 << " ms | "
            << this << ":" << getOption("name")
            << ":1||prepare linear system" << std::endl;
      }

      if (CasadiOptions::profiling) {
        time_start = getRealTime(); // Start timer
      }
      // Solve against F
      linsol_.solve(&F.front(), 1, false);
      if (CasadiOptions::profiling && !CasadiOptions::profilingBinary) {
        time_stop = getRealTime(); // Stop timer
        CasadiOptions::profilingLog
            << (time_stop-time_start)*1e6 << " ns | "
            << (time_stop-time_zero)*1e3 << " ms | "
            << this << ":" << getOption("name") << ":2||solve linear system" << std::endl;
      }

      if (monitored("step")) {
        userOut() << "  step = " << F << std::endl;
      }

      double abstolStep=0;
      if (numeric_limits<double>::infinity() != abstolStep_) {
        abstolStep = std::max((*std::max_element(F.data().begin(),
                                                  F.data().end())),
                               -(*std::min_element(F.data().begin(),
                                                   F.data().end())));
        if (monitored("stepsize")) {
          userOut() << "  stepsize = " << abstolStep << std::endl;
        }
        if (abstolStep <= abstolStep_) {
          casadi_msg("Converged to acceptable tolerance - abstolStep: " << abstolStep_);
          break;
        }
      }

      if (print_iteration_) {
        // Only print iteration header once in a while
        if (iter % 10==0) {
          printIteration(userOut());
        }

        // Print iteration information
        printIteration(userOut(), iter, abstol, abstolStep);
      }

      // Update Xk+1 = Xk - J^(-1) F
      std::transform(u.begin(), u.end(), F.begin(), u.begin(), std::minus<double>());

    }

    // Get auxiliary outputs
    for (int i=0; i<nOut(); ++i) {
      if (i!=iout_) jac_.getOutput(output(i), 1+i);
    }

    // Store the iteration count
    if (gather_stats_) stats_["iter"] = iter;

    if (success) stats_["return_status"] = "success";

    // Factorization up-to-date
    fact_up_to_date_ = true;

    casadi_msg("Newton::solveNonLinear():end after " << iter << " steps");
  }