Exemple #1
0
            Eigen::VectorXd operator()(const F& f, const Eigen::VectorXd& init, bool bounded) const
            {
                // Assert that the algorithm is non-gradient
                // TO-DO: Add support for MLSL (Multi-Level Single-Linkage)
                // TO-DO: Add better support for ISRES (Improved Stochastic Ranking Evolution Strategy)
                // clang-format off
                static_assert(Algorithm == nlopt::LN_COBYLA || Algorithm == nlopt::LN_BOBYQA ||
                    Algorithm == nlopt::LN_NEWUOA || Algorithm == nlopt::LN_NEWUOA_BOUND ||
                    Algorithm == nlopt::LN_PRAXIS || Algorithm == nlopt::LN_NELDERMEAD ||
                    Algorithm == nlopt::LN_SBPLX || Algorithm == nlopt::GN_DIRECT ||
                    Algorithm == nlopt::GN_DIRECT_L || Algorithm == nlopt::GN_DIRECT_L_RAND ||
                    Algorithm == nlopt::GN_DIRECT_NOSCAL || Algorithm == nlopt::GN_DIRECT_L_NOSCAL ||
                    Algorithm == nlopt::GN_DIRECT_L_RAND_NOSCAL || Algorithm == nlopt::GN_ORIG_DIRECT ||
                    Algorithm == nlopt::GN_ORIG_DIRECT_L || Algorithm == nlopt::GN_CRS2_LM ||
                    Algorithm == nlopt::GD_STOGO || Algorithm == nlopt::GD_STOGO_RAND ||
                    Algorithm == nlopt::GN_ISRES || Algorithm == nlopt::GN_ESCH, "NLOptNoGrad accepts gradient free nlopt algorithms only");
                // clang-format on

                int dim = init.size();
                nlopt::opt opt(Algorithm, dim);

                opt.set_max_objective(nlopt_func<F>, (void*)&f);

                std::vector<double> x(dim);
                Eigen::VectorXd::Map(&x[0], dim) = init;

                opt.set_maxeval(Params::opt_nloptnograd::iterations());

                if (bounded) {
                    opt.set_lower_bounds(std::vector<double>(dim, 0));
                    opt.set_upper_bounds(std::vector<double>(dim, 1));
                }

                double max;

                opt.optimize(x, max);

                return Eigen::VectorXd::Map(x.data(), x.size());
            }
Exemple #2
0
      bool learn_variance(Eigen::VectorXd& var, const Eigen::VectorXd& q) {
        if (adaptation_window())
          estimator_.add_sample(q);

        if (end_adaptation_window()) {
          compute_next_window();

          estimator_.sample_variance(var);

          double n = static_cast<double>(estimator_.num_samples());
          var = (n / (n + 5.0)) * var
                + 1e-3 * (5.0 / (n + 5.0)) * Eigen::VectorXd::Ones(var.size());

          estimator_.restart();

          ++adapt_window_counter_;
          return true;
        }

        ++adapt_window_counter_;
        return false;
      }
Exemple #3
0
void ObjectModelLine::optimizeModelCoefficients (const std::vector<int> &inliers,
		const Eigen::VectorXd &model_coefficients,
		Eigen::VectorXd &optimized_coefficients){
	assert (model_coefficients.size () == 6);

	if (inliers.size () == 0)
	{
		cout<<"[ObjectModelLine::optimizeModelCoefficients] Inliers vector empty! Returning the same coefficients";
		optimized_coefficients = model_coefficients;
		return;
	}

	assert (inliers.size () > 2);

	optimized_coefficients.resize (6);

	// Compute the 3x3 covariance matrix
	Eigen::Vector4d centroid = Centroid3D::computeCentroid(this->inputPointCloud, inliers);
//	compute3DCentroid (this->inputPointCloud, inliers, centroid);
	Eigen::Matrix3d covariance_matrix;
//	computeCovarianceMatrix (inputPointCloud, inliers, centroid, covariance_matrix);
	 covariance_matrix = Covariance3D::computeCovarianceMatrix (inputPointCloud, inliers, centroid);
	optimized_coefficients[0] = centroid[0];
	optimized_coefficients[1] = centroid[1];
	optimized_coefficients[2] = centroid[2];

	// Extract the eigenvalues and eigenvectors
	//cloud_geometry::eigen_cov (covariance_matrix, eigen_values, eigen_vectors);
	Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> ei_symm (covariance_matrix);
//	EIGEN_ALIGN_MEMORY Eigen::Vector3d eigen_values  = ei_symm.eigenvalues ();
	EIGEN_ALIGN_MEMORY Eigen::Matrix3d eigen_vectors = ei_symm.eigenvectors ();

#ifdef EIGEN3
	optimized_coefficients.tail<3> () = eigen_vectors.col (2).normalized ();
#else
	optimized_coefficients.end<3> () = eigen_vectors.col (2).normalized ();
#endif

}
bool JointTrajGeneratorRML::insertSegments(
        const Eigen::VectorXd &point,
        const ros::Time &time,
        TrajSegments &segments,
        std::vector<size_t> &index_permutation) const
{
  // Check the size of the jointspace command
  if(point.size() == n_dof_) {
    // Handle a position given as an Eigen vector
    TrajSegment segment(n_dof_,true);

    segment.start_time = time;
    segment.goal_positions = point;
    segments.clear();
    segments.push_back(segment);
  } else {
    RTT::log(RTT::Debug) << "Received trajectory of invalid size." <<RTT::endlog();
    return false;
  }

  return true;
}
Exemple #5
0
UnifiedModel::UnifiedModel(const Eigen::MatrixXd& centers, const Eigen::MatrixXd& widths, const Eigen::MatrixXd& slopes, const Eigen::VectorXd& offsets, bool normalized_basis_functions, bool lines_pivot_at_max_activation)
{
  int n_basis_functions = centers.rows();
#ifndef NDEBUG // Variables below are only required for asserts; check for NDEBUG to avoid warnings.
  int n_dims = centers.cols();
#endif 

  assert(n_basis_functions==widths.rows());
  assert(n_dims           ==widths.cols());
  assert(n_basis_functions==slopes.rows());
  assert(n_dims           ==slopes.cols());
  assert(n_basis_functions==offsets.size());
  
  centers_.resize(n_basis_functions);
  covars_.resize(n_basis_functions);
  slopes_.resize(n_basis_functions);
  offsets_.resize(n_basis_functions);
  priors_.resize(n_basis_functions);
  
  for (int i=0; i<n_basis_functions; i++)
  {
    centers_[i] = centers.row(i);
    covars_[i] = widths.row(i).asDiagonal();
    covars_[i] = covars_[i].array().square();
    slopes_[i] = slopes.row(i);
    offsets_[i] = offsets[i];
    
    priors_[i] = 1.0;
  }
  
  normalized_basis_functions_ = normalized_basis_functions;
  lines_pivot_at_max_activation_ = lines_pivot_at_max_activation;
  slopes_as_angles_ = false;
  
  cosine_basis_functions_ = false;

  initializeAllValuesVectorSize();  
}
void TrajActionServer::command_joints(Eigen::VectorXd q_cmd) {
    int njnts = q_cmd.size();
    std_msgs::Float64 qval_msg;
    qval_msg.data = q_cmd(0);
    j1_pub_.publish(qval_msg);
    if (njnts>1) {
        qval_msg.data=q_cmd(1);
        j2_pub_.publish(qval_msg);
        // mimic this for pitch mechanism joints:
        j2_1_pub_.publish(qval_msg);
        j2_2_pub_.publish(qval_msg);
        j2_5_pub_.publish(qval_msg);
        // the following 2 joints follow w/ negative of j2
        qval_msg.data= -q_cmd(1);
        j2_3_pub_.publish(qval_msg);
        j2_4_pub_.publish(qval_msg);        
    }
    if (njnts>2) {
        qval_msg.data=q_cmd(2);
        j3_pub_.publish(qval_msg);
    }
    if (njnts>3) {
        qval_msg.data=q_cmd(3);
        j4_pub_.publish(qval_msg);
    }
    if (njnts>4) {
        qval_msg.data=q_cmd(4);
        j5_pub_.publish(qval_msg);
    }
    if (njnts>5) {
        qval_msg.data=q_cmd(5);
        j6_pub_.publish(qval_msg);
    }
    if (njnts>6) {
        qval_msg.data=q_cmd(6);
        j7_pub_.publish(qval_msg);
    }
}
Exemple #7
0
void VectorSum::updateHook()
{
  // Reset the accumulator
  sum_.setZero();

  // Get the addends
  bool has_new_data = false;
  Eigen::VectorXd addend;
  while(addends_in_.read( addend, false ) == RTT::NewData) {
    if(addend.size() == dim_) {
      sum_ += addend;
      has_new_data = true;
    } else {
      RTT::log(RTT::Error) << "Input to VectorSum component does not have the correct dimension. All inputs should have dimension "<<dim_<<" but this input had dimension " << addend.size();
      this->error();
    }
  }

  // Write the sum
  if(has_new_data) {
    sum_out_.write( sum_ );
  }
}
Exemple #8
0
Eigen::VectorXd BlockSparseMatrix::operator* (const Eigen::VectorXd& rhs) const
{
	CHECK(!mbCSRDirty);
	int vectorLength = static_cast<int>(rhs.size());
	double* rhsData = new double[vectorLength];
	double* result = new double[vectorLength];
	memcpy(rhsData, rhs.data(), vectorLength * sizeof(double));
	char trans = 'n';
	int numRows = mBlockHeight * mNumBlocksHeight;

#ifdef _MKL_IMPLEMENT
	mkl_dcsrgemv(&trans, &numRows, const_cast<double*>(mCSREquivalent.GetValueData()), const_cast<int*>(mCSREquivalent.GetRowId()), const_cast<int*>(mCSREquivalent.GetColumnId()), rhsData, result);
#else 
	CHECK(0) << "_MKL_IMPLEMENT not defined!";
#endif
	
	Eigen::VectorXd ret(vectorLength);
	for (int i = 0; i < vectorLength; ++i)
		ret[i] = result[i];
	delete[] rhsData;
	delete[] result;
	return ret;
}
Exemple #9
0
template <typename PointT> void
pcl::SampleConsensusModelCircle3D<PointT>::optimizeModelCoefficients (
      const std::vector<int> &inliers, 
      const Eigen::VectorXf &model_coefficients, 
      Eigen::VectorXf &optimized_coefficients)
{
  optimized_coefficients = model_coefficients;

  // Needs a set of valid model coefficients
  if (model_coefficients.size () != 7)
  {
    PCL_ERROR ("[pcl::SampleConsensusModelCircle3D::optimizeModelCoefficients] Invalid number of model coefficients given (%lu)!\n", model_coefficients.size ());
    return;
  }

  // Need at least 3 samples
  if (inliers.size () <= 3)
  {
    PCL_ERROR ("[pcl::SampleConsensusModelCircle3D::optimizeModelCoefficients] Not enough inliers found to support a model (%lu)! Returning the same coefficients.\n", inliers.size ());
    return;
  }

  tmp_inliers_ = &inliers;

  OptimizationFunctor functor (static_cast<int> (inliers.size ()), this);
  Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
  Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm (num_diff);
  Eigen::VectorXd coeff;
  int info = lm.minimize (coeff);
  for (int i = 0; i < coeff.size (); ++i)
    optimized_coefficients[i] = static_cast<float> (coeff[i]);

  // Compute the L2 norm of the residuals
  PCL_DEBUG ("[pcl::SampleConsensusModelCircle3D::optimizeModelCoefficients] LM solver finished with exit code %i, having a residual norm of %g. \nInitial solution: %g %g %g %g %g %g %g \nFinal solution: %g %g %g %g %g %g %g\n",
             info, lm.fvec.norm (), model_coefficients[0], model_coefficients[1], model_coefficients[2], model_coefficients[3], model_coefficients[4], model_coefficients[5], model_coefficients[6], optimized_coefficients[0], optimized_coefficients[1], optimized_coefficients[2], optimized_coefficients[3], optimized_coefficients[4], optimized_coefficients[5], optimized_coefficients[6]);
}
Exemple #10
0
            Eigen::VectorXd operator()(const F& f, const Eigen::VectorXd& init, bool bounded) const
            {
                tools::par::init();
                typedef std::pair<Eigen::VectorXd, double> pair_t;
                auto body = [&](int i) {
                    // clang-format off
                    Eigen::VectorXd r_init = tools::random_vector(init.size());
                    Eigen::VectorXd v = Optimizer()(f, init, bounded);
                    double lik = opt::eval(f, v);
                    return std::make_pair(v, lik);
                    // clang-format on
                };

                auto comp = [](const pair_t& v1, const pair_t& v2) {
                    // clang-format off
                    return v1.second > v2.second;
                    // clang-format on
                };

                pair_t init_v = std::make_pair(init, -std::numeric_limits<float>::max());
                auto m = tools::par::max(init_v, Params::opt_parallelrepeater::repeats(), body, comp);

                return m.first;
            };
Exemple #11
0
 double log_prob_grad(const M& model,
                      Eigen::VectorXd& params_r,
                      Eigen::VectorXd& gradient,
                      std::ostream* msgs = 0) {
   using std::vector;
   using stan::agrad::var;
   Eigen::Matrix<var,Eigen::Dynamic,1> ad_params_r(params_r.size());
   for (size_t i = 0; i < model.num_params_r(); ++i) {
     stan::agrad::var var_i(params_r[i]);
     ad_params_r[i] = var_i;
   }
   try {
     var adLogProb
       = model
         .template log_prob<propto,
                            jacobian_adjust_transform>(ad_params_r, msgs);
     double val = adLogProb.val();
     stan::agrad::grad(adLogProb, ad_params_r, gradient);
     return val;
   } catch (std::exception &ex) {
     stan::agrad::recover_memory();
     throw;
   }
 }
Exemple #12
0
IGL_INLINE Eigen::MatrixXd igl::rotate_vectors(
                    const Eigen::MatrixXd& V,
                    const Eigen::VectorXd& A,
                    const Eigen::MatrixXd& B1,
                    const Eigen::MatrixXd& B2)
{
  Eigen::MatrixXd RV(V.rows(),V.cols());

  for (unsigned i=0; i<V.rows();++i)
  {
    double norm = V.row(i).norm();
    
    // project onto the tangent plane and convert to angle
    double a = atan2(B2.row(i).dot(V.row(i)),B1.row(i).dot(V.row(i)));

    // rotate
    a += (A.size() == 1) ? A(0) : A(i);

    // move it back to global coordinates
    RV.row(i) = norm*cos(a) * B1.row(i) + norm*sin(a) * B2.row(i);
  }

  return RV;
}
Exemple #13
0
// Fit a polynomial.
// Adapted from
// https://github.com/JuliaMath/Polynomials.jl/blob/master/src/Polynomials.jl#L676-L716
Eigen::VectorXd polyfit(Eigen::VectorXd xvals, Eigen::VectorXd yvals, int order)
{
  assert(xvals.size() == yvals.size());
  assert(order >= 1 && order <= xvals.size() - 1);
  Eigen::MatrixXd A(xvals.size(), order + 1);

  for (int i = 0; i < xvals.size(); i++)
  {
    A(i, 0) = 1.0;
  }

  for (int j = 0; j < xvals.size(); j++)
  {
    for (int i = 0; i < order; i++)
    {
      A(j, i + 1) = A(j, i) * xvals(j);
    }
  }

  auto Q = A.householderQr();
  auto result = Q.solve(yvals);
  return result;
}
Exemple #14
0
void display()
{
  using namespace Eigen;
  using namespace igl;
  using namespace std;

  if(!trackball_on && tot_num_samples < 10000)
  {
    if(S.size() == 0)
    {
      S.resize(V.rows());
      S.setZero();
    }
    VectorXd Si;
    const int num_samples = 20;
    ambient_occlusion(ei,V,N,num_samples,Si);
    S *= (double)tot_num_samples;
    S += Si*(double)num_samples;
    tot_num_samples += num_samples;
    S /= (double)tot_num_samples;
  }

  // Convert to 1-intensity
  C.conservativeResize(S.rows(),3);
  if(ao_on)
  {
    C<<S,S,S;
    C.array() = (1.0-ao_factor*C.array());
  }else
  {
    C.setConstant(1.0);
  }
  if(ao_normalize)
  {
    C.col(0) *= ((double)C.rows())/C.col(0).sum();
    C.col(1) *= ((double)C.rows())/C.col(1).sum();
    C.col(2) *= ((double)C.rows())/C.col(2).sum();
  }
  C.col(0) *= color(0);
  C.col(1) *= color(1);
  C.col(2) *= color(2);

  glClearColor(back[0],back[1],back[2],0);
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
  // All smooth points
  glEnable( GL_POINT_SMOOTH );

  glDisable(GL_LIGHTING);
  if(lights_on)
  {
    lights();
  }
  push_scene();
  glEnable(GL_DEPTH_TEST);
  glDepthFunc(GL_LEQUAL);
  glEnable(GL_NORMALIZE);
  glEnable(GL_COLOR_MATERIAL);
  glColorMaterial(GL_FRONT_AND_BACK,GL_AMBIENT_AND_DIFFUSE);
  push_object();

  // Draw the model
  // Set material properties
  glEnable(GL_COLOR_MATERIAL);
  draw_mesh(V,F,N,C);

  pop_object();

  // Draw a nice floor
  glPushMatrix();
  const double floor_offset =
    -2./bbd*(V.col(1).maxCoeff()-mid(1));
  glTranslated(0,floor_offset,0);
  const float GREY[4] = {0.5,0.5,0.6,1.0};
  const float DARK_GREY[4] = {0.2,0.2,0.3,1.0};
  draw_floor(GREY,DARK_GREY);
  glPopMatrix();

  pop_scene();

  report_gl_error();

  TwDraw();
  glutSwapBuffers();
  glutPostRedisplay();
}
Exemple #15
0
//==============================================================================
TEST(World, Cloning)
{
  // Create a list of skel files to test with
  std::vector<std::string> fileList;
  fileList.push_back(DART_DATA_PATH"skel/test/chainwhipa.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/single_pendulum.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/single_pendulum_euler_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/single_pendulum_ball_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/double_pendulum.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/double_pendulum_euler_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/double_pendulum_ball_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/serial_chain_revolute_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/serial_chain_eulerxyz_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/serial_chain_ball_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/serial_chain_ball_joint_20.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/serial_chain_ball_joint_40.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/simple_tree_structure.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/simple_tree_structure_euler_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/simple_tree_structure_ball_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/tree_structure.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/tree_structure_euler_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/test/tree_structure_ball_joint.skel");
  fileList.push_back(DART_DATA_PATH"skel/fullbody1.skel");

  std::vector<dart::simulation::WorldPtr> worlds;
  for(size_t i=0; i<fileList.size(); ++i)
    worlds.push_back(dart::utils::SkelParser::readWorld(fileList[i]));

  for(size_t i=0; i<worlds.size(); ++i)
  {
    dart::simulation::WorldPtr original = worlds[i];
    std::vector<dart::simulation::WorldPtr> clones;
    clones.push_back(original);
    for(size_t j=1; j<5; ++j)
      clones.push_back(clones[j-1]->clone());

    // Make sure all the Skeleton states match at the start
    // TODO(MXG): This should be removed once state also gets copied over during a cloning
    for(size_t j=1; j<clones.size(); ++j)
    {
      for(size_t k=0; k<original->getNumSkeletons(); ++k)
      {
        dart::dynamics::SkeletonPtr skel = original->getSkeleton(k);
        dart::dynamics::SkeletonPtr clone = clones[j]->getSkeleton(k);

        clone->setPositions(skel->getPositions());
        clone->setVelocities(skel->getVelocities());
        clone->setAccelerations(skel->getAccelerations());
        clone->setForces(skel->getForces());
      }
    }

#ifndef NDEBUG // Debug mode
    size_t numIterations = 3;
#else
    size_t numIterations = 500;
#endif

    for(size_t j=0; j<numIterations; ++j)
    {
      for(size_t k=0; k<original->getNumSkeletons(); ++k)
      {
        dart::dynamics::SkeletonPtr skel = original->getSkeleton(k);

        // Generate a random command vector
        Eigen::VectorXd commands = skel->getCommands();
        for(int q=0; q<commands.size(); ++q)
          commands[q] = random(-0.1, 0.1);

        // Assign the command vector to each clone of the kth skeleton
        for(size_t c=0; c<clones.size(); ++c)
        {
          dart::dynamics::SkeletonPtr skelClone = clones[c]->getSkeleton(k);
          skelClone->setCommands(commands);
        }
      }

      // Step each clone forward
      for(size_t c=0; c<clones.size(); ++c)
        clones[c]->step(false);
    }

    for(size_t c=0; c<clones.size(); ++c)
    {
      for(size_t k=0; k<original->getNumSkeletons(); ++k)
      {
        dart::dynamics::SkeletonPtr skel = original->getSkeleton(k);
        dart::dynamics::SkeletonPtr clone = clones[c]->getSkeleton(k);

        EXPECT_TRUE( equals(skel->getPositions(), clone->getPositions(), 0));
        EXPECT_TRUE( equals(skel->getVelocities(), clone->getVelocities(), 0));
        EXPECT_TRUE( equals(skel->getAccelerations(), clone->getAccelerations(), 0));
        EXPECT_TRUE( equals(skel->getForces(), clone->getForces(), 0));
      }
    }
  }
}
Exemple #16
0
void TensorJTree::LearnSpectralParameters()
{
	for (int i = 0; i < node_list->size(); ++i)
	{
		node_list->at(i)->MarkMultModes();
		FindObservationPartitions(i);
	}

	for (int i = 0; i < node_list->size(); ++i)
	{
		node_list->at(i)->MarkObservableRepresentation();
	}

	for (int i = 0; i < node_list->size(); ++i)
	{
		cout << i;
		cout << "\n";
		node_list->at(i)->ComputeObservableRepresentation();
	}

	for (int i = 0; i < node_list->size(); ++i)
	{
		node_list->at(i)->FinalizeObservableRepresentation();
	}


	for (int i = 0; i < node_list->size(); ++i)
	{
		cout << i;
		cout << "\n";
		vector<double> big_forward_vec;
	//	big_forward_vec.reserve(10000);
		Matrix big_back_matrix;
	//	big_back_matrix.Reserve(10000);  	

		bool invModesExist = node_list->at(i)->InvModesExists();
		
		if (invModesExist)
		{
			vector<Tensor*>& forward_list = node_list->at(i)->GetForwardList();
			vector<Tensor*>& extra_list = node_list->at(i)->GetExtraList();

			Tensor& X = node_list->at(i)->GetTransformTensor().GetTensor();
			vector<int> result_dims = X.Dims();
			assert(forward_list.size() == extra_list.size());
		
			vector<int> inv_group_modes =  node_list->at(i)->GetInvModeGroup();
			vector<int> inv_modes = node_list->at(i)->GetInvModes();

			for (int j = 0; j < forward_list.size(); ++j)
			{
				Tensor& forward_tensor = *(forward_list[j]);
				Tensor& back_tensor = *(extra_list[j]);
	
				if (j == 0)
				{
					Tensor::CreateLinearSystem(big_forward_vec, big_back_matrix, X, back_tensor, forward_tensor, inv_group_modes, inv_modes); 
				}
				else
				{
					vector<double> forward_vec;
					Matrix back_matrix;

					Tensor::CreateLinearSystem(forward_vec, back_matrix, X, back_tensor, forward_tensor, inv_group_modes, inv_modes); 

					VectorPlus::Concat(big_forward_vec, forward_vec);
					big_back_matrix.MatrixConcat(back_matrix);
				}
			}

			Eigen::MatrixXd A(big_back_matrix.Dim(0), big_back_matrix.Dim(1));
			for (int j = 0; j < big_back_matrix.Dim(0); ++j)
			{
				for (int k = 0; k < big_back_matrix.Dim(1); ++k)
				{
					A(j,k) = big_back_matrix.At(j,k);
				}
			}

			Eigen::VectorXd b(big_forward_vec.size());
			for (int j = 0; j < big_forward_vec.size(); ++j)
			{
				b(j) = big_forward_vec.at(j);
			}
			//cout << "yee";
			Eigen::MatrixXd newA = A.transpose() * A;
			Eigen::MatrixXd newb = A.transpose() * b;
			Eigen::VectorXd res = newA.ldlt().solve(newb);
			//cout << "yeeldlt solved";
			//Eigen::VectorXd res = A.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(b);
			//cout << "yeesvdsolved";
			// solve the linear system here
			Tensor result_tensor(result_dims);
			for (int j = 0; j < res.size(); ++j)
			{
				result_tensor.Set(j, res(j));
			}
			
			Tensor& curr_result = node_list->at(i)->GetTransformTensor().GetTensor();
		//	assert(Tensor::Diff(result_tensor, curr_result) < .0001);
			node_list->at(i)->SetTransformTensor(result_tensor);
		}
	}

	// average by computing template and see what happens
	// hack it so we change the template
/*	template_to_cliques = new vector<vector<int>*>();
	for (int i = 0; i < NumCliques(); ++i)
	{
		template_to_cliques->push_back(new vector<int>(1,i));
	} */

	/*for (int i = 0; i < template_to_cliques->size(); ++i)
	{
		vector<int>& i_cliques = *(template_to_cliques->at(i));

		vector<double> big_forward_vec;
		Matrix big_back_matrix;
		vector<int> result_dims;

		bool invModesExist = node_list->at(i_cliques[0])->InvModesExists();
		if (invModesExist)
		{
			for (int j = 0; j < i_cliques.size(); ++j)
			{
				if (node_list->at(i_cliques[0])->is_invalid())
					continue;

				Tensor& forward_tensor = node_list->at(i_cliques[j])->GetForwardTensor();
				Tensor& back_tensor = node_list->at(i_cliques[j])->GetUExtraTensor();
				Tensor& X = node_list->at(i_cliques[j])->GetTransformTensor().GetTensor();
			
				vector<int>& inv_group_modes =  node_list->at(i_cliques[j])->GetInvModeGroup();
				vector<int>& inv_modes = node_list->at(i_cliques[j])->GetInvModes();
				if (j == 0)
				{
					Tensor::CreateLinearSystem(big_forward_vec, big_back_matrix, X, back_tensor, forward_tensor, inv_group_modes, inv_modes); 
					result_dims = X.Dims();
				}
				else
				{
					vector<double> forward_vec;
					Matrix back_matrix;

					Tensor::CreateLinearSystem(forward_vec, back_matrix, X, back_tensor, forward_tensor, inv_group_modes, inv_modes); 

					VectorPlus::Concat(big_forward_vec, forward_vec);
					big_back_matrix.MatrixConcat(back_matrix);
				}
			}

			Eigen::MatrixXd A(big_back_matrix.Dim(0), big_back_matrix.Dim(1));
			for (int i = 0; i < big_back_matrix.Dim(0); ++i)
			{
				for (int j = 0; j < big_back_matrix.Dim(1); ++j)
				{
					A(i,j) = big_back_matrix.At(i,j);
				}
			}

			Eigen::VectorXd b(big_forward_vec.size());
			for (int i = 0; i < big_forward_vec.size(); ++i)
			{
				b(i) = big_forward_vec.at(i);
			}

			Eigen::VectorXd res = A.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(b);
			// solve the linear system here
			Tensor result_tensor(result_dims);
			for (int i = 0; i < res.size(); ++i)
			{
				result_tensor.Set(i, res(i));
			}
			
			Tensor& curr_result = node_list->at(i_cliques[0])->GetTransformTensor().GetTensor();

		//	assert(Tensor::Diff(result_tensor, curr_result) < .0001);
			for (int j = 0; j < i_cliques.size(); ++j)
			{
				node_list->at(i_cliques[j])->SetTransformTensor(result_tensor);
			}
		}
	} */

	
/*	for (int i = 0; i < template_to_cliques->size(); ++i)
	{
		vector<int>& i_cliques = *(template_to_cliques->at(i));
		TensorCPT* avg_tensor =  new TensorCPT(node_list->at(i_cliques[0])->GetTransformTensor());
		for (int j = 1; j < i_cliques.size(); ++j)
		{
			TensorCPT::Add(*avg_tensor, node_list->at(i_cliques[j])->GetTransformTensor());
		}
		avg_tensor->Divide(i_cliques.size());

		for (int j = 0; j < i_cliques.size(); ++j)
		{
			node_list->at(i_cliques[j])->SetTransformTensor(avg_tensor->GetTensor());
		}
		delete(avg_tensor);
	} */
}
Exemple #17
0
void TensorJTree::LearnTemplateSpectralParameters()
{
	for (int i = 0; i < node_list->size(); ++i)
	{
		node_list->at(i)->MarkMultModes();
		FindObservationPartitions(i);
	}

	for (int i = 0; i < node_list->size(); ++i)
	{
		node_list->at(i)->MarkObservableRepresentation();
	}

	for (int i = 0; i < node_list->size(); ++i)
	{
		cout << i;
		cout << "\n";
		node_list->at(i)->ComputeObservableRepresentation();
	}
		cout << "yeee";
	cout << "\n";
	for (int i = 0; i < node_list->size(); ++i)
	{
				cout << i;
		cout << "\n";
		node_list->at(i)->FinalizeTemplateObservableRepresentation();
	}

		cout << "yeee";
	cout << "\n";
/*	template_to_cliques = new vector<vector<int>*>();
	for (int i = 0; i < NumCliques(); ++i)
	{
		template_to_cliques->push_back(new vector<int>(1,i));
	} */
	cout << "yeee";
	cout << "\n";
	for (int i = 0; i < template_to_cliques->size(); ++i)
	{
		vector<int>& i_cliques = *(template_to_cliques->at(i));

		vector<double> big_forward_vec;
		Matrix big_back_matrix;
		vector<int> result_dims;

		bool invModesExist = node_list->at(i_cliques[0])->InvModesExists();
		if (invModesExist)
		{

			for (int j = 0; j < i_cliques.size(); ++j)
			{
				cout << j;
				cout << "\n";
				if (node_list->at(i_cliques[0])->is_invalid())
				{
					assert(0);
					continue;
				}


				int index = i_cliques[j];
				vector<Tensor*>& forward_list = node_list->at(index)->GetForwardList();
				vector<Tensor*>& extra_list = node_list->at(index)->GetExtraList();

				Tensor& X = node_list->at(index)->GetTransformTensor().GetTensor();

				if (j == 0)
				{
					result_dims = X.Dims();
				}

				assert(forward_list.size() == extra_list.size());
		
				vector<int> inv_group_modes =  node_list->at(index)->GetInvModeGroup();
				vector<int> inv_modes = node_list->at(index)->GetInvModes();

				for (int k = 0; k < forward_list.size(); ++k)
				{

					Tensor& forward_tensor = *(forward_list[k]);
					Tensor& back_tensor = *(extra_list[k]);
	
					if (j == 0 && k == 0)
					{
						Tensor::CreateLinearSystem(big_forward_vec, big_back_matrix, X, back_tensor, forward_tensor, inv_group_modes, inv_modes); 
					}
					else
					{
						vector<double> forward_vec;
						Matrix back_matrix;

						cout << "yeebeforesystem";
						Tensor::CreateLinearSystem(forward_vec, back_matrix, X, back_tensor, forward_tensor, inv_group_modes, inv_modes); 
						cout << "yeeaftersystem";
						VectorPlus::Concat(big_forward_vec, forward_vec);
						big_back_matrix.MatrixConcat(back_matrix);
						cout << "yeeafterconcat";
					}
				}
				node_list->at(index)->DeleteForwardList();
				node_list->at(index)->DeleteExtraList();
			}
			cout << big_back_matrix.Dim(0);
			cout << "\n";
			cout << big_back_matrix.Dim(1);
			cout << "\n";
			Eigen::MatrixXd A(big_back_matrix.Dim(0), big_back_matrix.Dim(1));

			cout << "allocatedbigone";
			for (int j = 0; j < big_back_matrix.Dim(0); ++j)
			{
				for (int k = 0; k < big_back_matrix.Dim(1); ++k)
				{
					(A)(j,k) = big_back_matrix.At(j,k);
				}
			}

			Eigen::VectorXd b(big_forward_vec.size());
			for (int j = 0; j < big_forward_vec.size(); ++j)
			{
				b(j) = big_forward_vec.at(j);
			}
			cout << "before solve";
			cout << "\n";
			Eigen::MatrixXd newA = A.transpose() * (A);
			Eigen::MatrixXd newb = A.transpose() * b;
			Eigen::VectorXd res = newA.ldlt().solve(newb);
			cout << "after solve";
			cout << "\n";		
			// solve the linear system here
			Tensor result_tensor(result_dims);
			for (int j = 0; j < res.size(); ++j)
			{
				result_tensor.Set(j, res(j));
			}
			
			Tensor& curr_result = node_list->at(i_cliques[0])->GetTransformTensor().GetTensor();

		//	assert(Tensor::Diff(result_tensor, curr_result) < .0001);
			for (int j = 0; j < i_cliques.size(); ++j)
			{
				node_list->at(i_cliques[j])->SetTransformTensor(result_tensor);
			}
		}
	}
}
Exemple #18
0
TEST(McmcDiagEMetric, gradients) {
  rng_t base_rng(0);
  
  Eigen::VectorXd q = Eigen::VectorXd::Ones(11);
  
  stan::mcmc::diag_e_point z(q.size());
  z.q = q;
  z.p.setOnes();
  
  std::fstream data_stream(std::string("").c_str(), std::fstream::in);
  stan::io::dump data_var_context(data_stream);
  data_stream.close();

  std::stringstream model_output, metric_output;
  
  funnel_model_namespace::funnel_model model(data_var_context, &model_output);
  
  stan::mcmc::diag_e_metric<funnel_model_namespace::funnel_model, rng_t> metric(model, &metric_output);
  
  double epsilon = 1e-6;
  
  metric.update(z);
  
  Eigen::VectorXd g1 = metric.dtau_dq(z);
  
  for (int i = 0; i < z.q.size(); ++i) {
    
    double delta = 0;
    
    z.q(i) += epsilon;
    metric.update(z);
    delta += metric.tau(z);
    
    z.q(i) -= 2 * epsilon;
    metric.update(z);
    delta -= metric.tau(z);
    
    z.q(i) += epsilon;
    metric.update(z);
    
    delta /= 2 * epsilon;
    
    EXPECT_NEAR(delta, g1(i), epsilon);
    
  }
  
  Eigen::VectorXd g2 = metric.dtau_dp(z);
  
  for (int i = 0; i < z.q.size(); ++i) {
    
    double delta = 0;
    
    z.p(i) += epsilon;
    delta += metric.tau(z);
    
    z.p(i) -= 2 * epsilon;
    delta -= metric.tau(z);
    
    z.p(i) += epsilon;
    
    delta /= 2 * epsilon;
    
    EXPECT_NEAR(delta, g2(i), epsilon);
    
  }
  
  Eigen::VectorXd g3 = metric.dphi_dq(z);
  
  for (int i = 0; i < z.q.size(); ++i) {
    
    double delta = 0;
    
    z.q(i) += epsilon;
    metric.update(z);
    delta += metric.phi(z);
    
    z.q(i) -= 2 * epsilon;
    metric.update(z);
    delta -= metric.phi(z);
    
    z.q(i) += epsilon;
    metric.update(z);
    
    delta /= 2 * epsilon;
    
    EXPECT_NEAR(delta, g3(i), epsilon);
    
  }

  EXPECT_EQ("", model_output.str());
  EXPECT_EQ("", metric_output.str());
}
Exemple #19
0
TEST(McmcUnitENuts, tree_boundary_test) {
  rng_t base_rng(4839294);

  stan::mcmc::unit_e_point z_init(3);
  z_init.q(0) = 1;
  z_init.q(1) = -1;
  z_init.q(2) = 1;
  z_init.p(0) = -1;
  z_init.p(1) = 1;
  z_init.p(2) = -1;

  std::stringstream output;
  stan::interface_callbacks::writer::stream_writer writer(output);
  std::stringstream error_stream;
  stan::interface_callbacks::writer::stream_writer error_writer(error_stream);

  std::fstream empty_stream("", std::fstream::in);
  stan::io::dump data_var_context(empty_stream);

  typedef gauss3D_model_namespace::gauss3D_model model_t;
  model_t model(data_var_context);

  // Compute expected tree boundaries
  typedef stan::mcmc::unit_e_metric<model_t, rng_t> metric_t;
  metric_t metric(model);

  stan::mcmc::expl_leapfrog<metric_t> unit_e_integrator;
  double epsilon = 0.1;

  stan::mcmc::unit_e_point z_test = z_init;
  metric.init(z_test, writer, error_writer);

  unit_e_integrator.evolve(z_test, metric, epsilon, writer, error_writer);
  Eigen::VectorXd p_sharp_forward_1 = metric.dtau_dp(z_test);

  unit_e_integrator.evolve(z_test, metric, epsilon, writer, error_writer);
  Eigen::VectorXd p_sharp_forward_2 = metric.dtau_dp(z_test);

  unit_e_integrator.evolve(z_test, metric, epsilon, writer, error_writer);
  unit_e_integrator.evolve(z_test, metric, epsilon, writer, error_writer);
  Eigen::VectorXd p_sharp_forward_3 = metric.dtau_dp(z_test);

  unit_e_integrator.evolve(z_test, metric, epsilon, writer, error_writer);
  unit_e_integrator.evolve(z_test, metric, epsilon, writer, error_writer);
  unit_e_integrator.evolve(z_test, metric, epsilon, writer, error_writer);
  unit_e_integrator.evolve(z_test, metric, epsilon, writer, error_writer);
  Eigen::VectorXd p_sharp_forward_4 = metric.dtau_dp(z_test);

  z_test = z_init;
  metric.init(z_test, writer, error_writer);

  unit_e_integrator.evolve(z_test, metric, -epsilon, writer, error_writer);
  Eigen::VectorXd p_sharp_backward_1 = metric.dtau_dp(z_test);

  unit_e_integrator.evolve(z_test, metric, -epsilon, writer, error_writer);
  Eigen::VectorXd p_sharp_backward_2 = metric.dtau_dp(z_test);

  unit_e_integrator.evolve(z_test, metric, -epsilon, writer, error_writer);
  unit_e_integrator.evolve(z_test, metric, -epsilon, writer, error_writer);
  Eigen::VectorXd p_sharp_backward_3 = metric.dtau_dp(z_test);

  unit_e_integrator.evolve(z_test, metric, -epsilon, writer, error_writer);
  unit_e_integrator.evolve(z_test, metric, -epsilon, writer, error_writer);
  unit_e_integrator.evolve(z_test, metric, -epsilon, writer, error_writer);
  unit_e_integrator.evolve(z_test, metric, -epsilon, writer, error_writer);
  Eigen::VectorXd p_sharp_backward_4 = metric.dtau_dp(z_test);

  // Check expected tree boundaries to those dynamically geneated by NUTS
  stan::mcmc::unit_e_nuts<model_t, rng_t> sampler(model, base_rng);

  sampler.set_nominal_stepsize(epsilon);
  sampler.set_stepsize_jitter(0);
  sampler.sample_stepsize();

  stan::mcmc::ps_point z_propose = z_init;

  Eigen::VectorXd p_sharp_left = Eigen::VectorXd::Zero(z_init.p.size());
  Eigen::VectorXd p_sharp_right = Eigen::VectorXd::Zero(z_init.p.size());
  Eigen::VectorXd rho = z_init.p;
  double log_sum_weight = -std::numeric_limits<double>::infinity();

  double H0 = -0.1;
  int n_leapfrog = 0;
  double sum_metro_prob = 0;

  // Depth 0 forward
  sampler.z() = z_init;
  sampler.init_hamiltonian(writer, error_writer);
  sampler.build_tree(0, z_propose,
                     p_sharp_left, p_sharp_right, rho,
                     H0, 1, n_leapfrog, log_sum_weight,
                     sum_metro_prob,
                     writer, error_writer);

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_forward_1(n), p_sharp_left(n));

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_forward_1(n), p_sharp_right(n));

  // Depth 1 forward
  sampler.z() = z_init;
  sampler.init_hamiltonian(writer, error_writer);
  sampler.build_tree(1, z_propose,
                     p_sharp_left, p_sharp_right, rho,
                     H0, 1, n_leapfrog, log_sum_weight,
                     sum_metro_prob,
                     writer, error_writer);

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_forward_1(n), p_sharp_left(n));

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_forward_2(n), p_sharp_right(n));

  // Depth 2 forward
  sampler.z() = z_init;
  sampler.init_hamiltonian(writer, error_writer);
  sampler.build_tree(2, z_propose,
                     p_sharp_left, p_sharp_right, rho,
                     H0, 1, n_leapfrog, log_sum_weight,
                     sum_metro_prob,
                     writer, error_writer);

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_forward_1(n), p_sharp_left(n));

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_forward_3(n), p_sharp_right(n));

  // Depth 3 forward
  sampler.z() = z_init;
  sampler.init_hamiltonian(writer, error_writer);
  sampler.build_tree(3, z_propose,
                     p_sharp_left, p_sharp_right, rho,
                     H0, 1, n_leapfrog, log_sum_weight,
                     sum_metro_prob,
                     writer, error_writer);

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_forward_1(n), p_sharp_left(n));

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_forward_4(n), p_sharp_right(n));

  // Depth 0 backward
  sampler.z() = z_init;
  sampler.init_hamiltonian(writer, error_writer);
  sampler.build_tree(0, z_propose,
                     p_sharp_left, p_sharp_right, rho,
                     H0, -1, n_leapfrog, log_sum_weight,
                     sum_metro_prob,
                     writer, error_writer);

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_backward_1(n), p_sharp_left(n));

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_backward_1(n), p_sharp_right(n));

  // Depth 1 backward
  sampler.z() = z_init;
  sampler.init_hamiltonian(writer, error_writer);
  sampler.build_tree(1, z_propose,
                     p_sharp_left, p_sharp_right, rho,
                     H0, -1, n_leapfrog, log_sum_weight,
                     sum_metro_prob,
                     writer, error_writer);

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_backward_1(n), p_sharp_left(n));

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_backward_2(n), p_sharp_right(n));

  // Depth 2 backward
  sampler.z() = z_init;
  sampler.init_hamiltonian(writer, error_writer);
  sampler.build_tree(2, z_propose,
                     p_sharp_left, p_sharp_right, rho,
                     H0, -1, n_leapfrog, log_sum_weight,
                     sum_metro_prob,
                     writer, error_writer);

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_backward_1(n), p_sharp_left(n));

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_backward_3(n), p_sharp_right(n));

  // Depth 3 backward
  sampler.z() = z_init;
  sampler.init_hamiltonian(writer, error_writer);
  sampler.build_tree(3, z_propose,
                     p_sharp_left, p_sharp_right, rho,
                     H0, -1, n_leapfrog, log_sum_weight,
                     sum_metro_prob,
                     writer, error_writer);

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_backward_1(n), p_sharp_left(n));

  for (int n = 0; n < rho.size(); ++n)
    EXPECT_FLOAT_EQ(p_sharp_backward_4(n), p_sharp_right(n));
}
Exemple #20
0
void ba81NormalQuad::layer::setStructure(Eigen::ArrayBase<T1> &param,
					 Eigen::MatrixBase<T2> &gmean, Eigen::MatrixBase<T3> &gcov)
{
	abilitiesMap.clear();
	std::string str = string_snprintf("layer:");
	for (int ax=0; ax < gmean.rows(); ++ax) {
		if (!abilitiesMask[ax]) continue;
		abilitiesMap.push_back(ax);
		str += " ";
		str += quad->ig.factorNames[ax];
	}
	str += ":";

	itemsMap.clear();
	glItemsMap.resize(param.cols(), -1);
	for (int ix=0, lx=0; ix < param.cols(); ++ix) {
		if (!itemsMask[ix]) continue;
		itemsMap.push_back(ix);
		glItemsMap[ix] = lx++;
		str += string_snprintf(" %d", ix);
	}
	
	str += "\n";
	//mxLogBig(str);

	dataColumns.clear();
	dataColumns.reserve(numItems());
	totalOutcomes = 0;
	for (int ix=0; ix < numItems(); ++ix) {
		int outcomes = quad->ig.itemOutcomes[ itemsMap[ix] ];
		itemOutcomes.push_back(outcomes);
		cumItemOutcomes.push_back(totalOutcomes);
		totalOutcomes += outcomes;
		dataColumns.push_back(quad->ig.dataColumns[ itemsMap[ix] ]);
	}

	Eigen::VectorXd mean;
	Eigen::MatrixXd cov;
	globalToLocalDist(gmean, gcov, mean, cov);

	if (mean.size() == 0) {
		numSpecific = 0;
		primaryDims = 0;
		maxDims = 1;
		totalQuadPoints = 1;
		totalPrimaryPoints = 1;
		weightTableSize = 1;
		return;
	}

	numSpecific = 0;
	
	if (quad->ig.twotier) detectTwoTier(param, mean, cov);
	if (numSpecific) quad->hasBifactorStructure = true;

	primaryDims = cov.cols() - numSpecific;
	maxDims = primaryDims + (numSpecific? 1 : 0);

	totalQuadPoints = 1;
	for (int dx=0; dx < maxDims; dx++) {
		totalQuadPoints *= quad->gridSize;
	}

	totalPrimaryPoints = totalQuadPoints;
	weightTableSize = totalQuadPoints;

	if (numSpecific) {
		totalPrimaryPoints /= quad->gridSize;
		weightTableSize *= numSpecific;
	}
}
Exemple #21
0
//==============================================================================
void Function::evalGradient(const Eigen::VectorXd& _x, Eigen::VectorXd& _grad)
{
  Eigen::Map<Eigen::VectorXd> tmpGrad(_grad.data(), _grad.size());
  evalGradient(_x, tmpGrad);
}
Exemple #22
0
Eigen::MatrixXd KalmanFilter::Propagate(Eigen::VectorXd x_hat_plus, Eigen::MatrixXd P_plus, double v_m, double w_m, Eigen::MatrixXd Q, double dt)
{
	int n = x_hat_plus.size();
	
	double ori = x_hat_plus(2);

		
	Eigen::VectorXd x_hat_min(n);
	Eigen::MatrixXd P_min(n,n); 
	
	Eigen::MatrixXd Set(n,n+1);		//Set of state and covariance
	
	Eigen::MatrixXd Phi_R(3,3);
	Eigen::MatrixXd G(3,2);
	
	Eigen::MatrixXd tempTrans;
	
	//Propagate state (Robot and Landmark, no change in Landmark)
	x_hat_min.head(3) << v_m*cos(ori),
				v_m*sin(ori),
				w_m;
	
	x_hat_min.head(3) = x_hat_plus.head(3) + dt*x_hat_min.head(3);	
	x_hat_min.tail(n-3) = x_hat_plus.tail(n-3);


	// Set up Phi_R and G
	Phi_R << 1, 0, -dt*v_m*sin(ori),
		0, 1, dt*v_m*cos(ori),
		0, 0, 1;
	
	G << -dt*cos(ori), 0,
		-dt*sin(ori), 0,
		0, -dt;
	

	// Propagate Covariance
	// P_RR
	P_min.block(0,0,3,3) = Phi_R * P_plus.block(0,0,3,3) * Phi_R.transpose() + G * Q * G.transpose();

	// P_RL
	P_min.block(0,3,3,n-3) = Phi_R * P_plus.block(0,3,3,n-3);

	// P_LR
	tempTrans = P_min.block(0,3,3,n-3).transpose();
	P_min.block(3,0,n-3,3) = tempTrans;

	// P_LL are not affected by propagation.
	P_min.block(3,3,n-3,n-3) = P_plus.block(3,3,n-3, n-3);

	// Make sure the matrix is symmetric, may skip this option
	tempTrans = 0.5 * (P_min + P_min.transpose());
	P_min = tempTrans;	

	// Put State vector and Covariance into one matrix and return it.
	// We can parse these out in the main function.
	Set.block(0,0,n,1) = x_hat_min;
	Set.block(0,1,n,n) = P_min;

	return Set;
}
Exemple #23
0
inline std::vector<double> eigenVectorToStdVector(const Eigen::VectorXd& v) {
    std::vector<double> m(v.size());
    for (int i = 0; i < v.size(); i++)
        m[i] = v[i];
    return m;
}
Exemple #24
0
void Input::initMolecule() {
  // Gather information necessary to build molecule_
  // 1. number of atomic centers
  int nuclei = int(geometry_.size() / 4);
  // 2. position and charges of atomic centers
  Eigen::Matrix3Xd centers = Eigen::Matrix3Xd::Zero(3, nuclei);
  Eigen::VectorXd charges = Eigen::VectorXd::Zero(nuclei);
  int j = 0;
  for (int i = 0; i < nuclei; ++i) {
    centers.col(i) =
        (Eigen::Vector3d() << geometry_[j], geometry_[j + 1], geometry_[j + 2])
            .finished();
    charges(i) = geometry_[j + 3];
    j += 4;
  }
  // 3. list of atoms and list of spheres
  std::vector<Atom> radiiSet;
  std::vector<Atom> atoms;
  atoms.reserve(nuclei);
  // FIXME Code duplication in function initMolecule in interface/Meddle.cpp
  tie(radiiSetName_, radiiSet) = utils::bootstrapRadiiSet().create(radiiSet_);
  for (int i = 0; i < charges.size(); ++i) {
    int index = int(charges(i)) - 1;
    atoms.push_back(radiiSet[index]);
    if (scaling_)
      atoms[i].radiusScaling = 1.2;
  }
  // Based on the creation mode (Implicit or Atoms)
  // the spheres list might need postprocessing
  if (mode_ == "IMPLICIT" || mode_ == "ATOMS") {
    for (int i = 0; i < charges.size(); ++i) {
      // Convert to Bohr and multiply by scaling factor (alpha)
      double radius = atoms[i].radius * angstromToBohr() * atoms[i].radiusScaling;
      spheres_.push_back(Sphere(centers.col(i), radius));
    }
    if (mode_ == "ATOMS") {
      // Loop over the atomsInput array to get which atoms will have a user-given
      // radius
      for (size_t i = 0; i < atoms_.size(); ++i) {
        int index =
            atoms_[i] - 1; // -1 to go from human readable to machine readable
        // Put the new Sphere in place of the implicit-generated one
        spheres_[index] = Sphere(centers.col(index), radii_[i]);
      }
    }
  }

  // 4. masses
  Eigen::VectorXd masses = Eigen::VectorXd::Zero(nuclei);
  for (int i = 0; i < masses.size(); ++i) {
    masses(i) = atoms[i].mass;
  }
  // 5. molecular point group
  // FIXME currently hardcoded to C1

  // OK, now get molecule_
  molecule_ = Molecule(nuclei, charges, masses, centers, atoms, spheres_);
  // Check that all atoms have a radius attached
  std::vector<Atom>::const_iterator res =
      std::find_if(atoms.begin(), atoms.end(), invalid);
  if (res != atoms.end()) {
    std::cout << molecule_ << std::endl;
    PCMSOLVER_ERROR("Some atoms do not have a radius attached. Please specify a "
                    "radius for all atoms (see "
                    "http://pcmsolver.readthedocs.org/en/latest/users/input.html)!");
  }
}
// more general version--arbitrary number of joints
bool TrajActionServer::update_trajectory(double traj_clock, trajectory_msgs::JointTrajectory trajectory, Eigen::VectorXd qvec_prev, 
        int &isegment, Eigen::VectorXd &qvec_new) {
    
    trajectory_msgs::JointTrajectoryPoint trajectory_point_from, trajectory_point_to;
    int njnts = qvec_prev.size();
    cout<<"njnts for qvec_prev: "<<njnts<<endl;
    Eigen::VectorXd qvec, qvec_to, delta_qvec, dqvec;
    int nsegs = trajectory.points.size() - 1;
    ROS_INFO("update_trajectory: nsegs = %d, isegment = %d",nsegs,isegment);
    double t_subgoal;
    //cout<<"traj_clock = "<<traj_clock<<endl;
    if (isegment < nsegs) {
        trajectory_point_to = trajectory.points[isegment + 1];
        t_subgoal = trajectory_point_to.time_from_start.toSec();
        cout<<"iseg = "<<isegment<<"; t_subgoal = "<<t_subgoal<<endl;
    } else {
        cout << "reached end of last segment" << endl;
        trajectory_point_to = trajectory.points[nsegs];
        t_subgoal = trajectory_point_to.time_from_start.toSec();
        
        for (int i = 0; i < njnts; i++) {
            qvec_new[i] = trajectory_point_to.positions[i];
        }
        cout << "final time: " << t_subgoal << endl;
        return false;
    }

    cout<<"iseg = "<<isegment<<"; t_subgoal = "<<t_subgoal<<endl;
    while ((t_subgoal < traj_clock)&&(isegment < nsegs)) {
        cout<<"loop: iseg = "<<isegment<<"; t_subgoal = "<<t_subgoal<<endl;
        isegment++;
        if (isegment > nsegs - 1) {
            //last point
            trajectory_point_to = trajectory.points[nsegs];
            cout<<"next traj pt #jnts = "<<trajectory_point_to.positions.size()<<endl;
            for (int i = 0; i < njnts; i++) {
                qvec_new[i] = trajectory_point_to.positions[i];
            }
            cout << "iseg>nsegs" << endl;
            return false;
        }

        trajectory_point_to = trajectory.points[isegment + 1];
        t_subgoal = trajectory_point_to.time_from_start.toSec();
    }
    //cout<<"t_subgoal = "<<t_subgoal<<endl;
    //here if have a valid segment:
    cout<<"njnts of trajectory_point_to: "<<trajectory_point_to.positions.size()<<endl;
    qvec_to.resize(njnts);
    for (int i = 0; i < njnts; i++) {
        qvec_to[i] = trajectory_point_to.positions[i];
    }
    delta_qvec.resize(njnts);
    delta_qvec = qvec_to - qvec_prev; //this far to go until next node;
    double delta_time = t_subgoal - traj_clock;
    if (delta_time < dt_traj) delta_time = dt_traj;
    dqvec.resize(njnts);
    dqvec = delta_qvec * dt_traj / delta_time;
    qvec_new = qvec_prev + dqvec;
    return true;
}
Exemple #26
0
//==============================================================================
bool GradientDescentSolver::solve()
{
  bool minimized = false;
  bool satisfied = false;

  std::shared_ptr<Problem> problem = mProperties.mProblem;
  if(nullptr == problem)
  {
    dtwarn << "[GradientDescentSolver::solve] Attempting to solve a nullptr "
           << "problem! We will return false.\n";
    return false;
  }

  double tol = std::abs(mProperties.mTolerance);
  double gamma = mGradientP.mStepSize;
  size_t dim = problem->getDimension();

  if(dim == 0)
  {
    problem->setOptimalSolution(Eigen::VectorXd());
    problem->setOptimumValue(0.0);
    return true;
  }

  Eigen::VectorXd x = problem->getInitialGuess();
  assert(x.size() == static_cast<int>(dim));

  Eigen::VectorXd lastx = x;
  Eigen::VectorXd dx(x.size());
  Eigen::VectorXd grad(x.size());

  mEqConstraintCostCache.resize(problem->getNumEqConstraints());
  mIneqConstraintCostCache.resize(problem->getNumIneqConstraints());

  mLastNumIterations = 0;
  size_t attemptCount = 0;
  do
  {
    size_t stepCount = 0;
    do
    {
      ++mLastNumIterations;

      // Perturb the configuration if we have reached an iteration where we are
      // supposed to perturb it.
      if(mGradientP.mPerturbationStep > 0 && stepCount > 0
         && stepCount%mGradientP.mPerturbationStep == 0)
      {
        dx = x; // Seed the configuration randomizer with the current configuration
        randomizeConfiguration(dx);

        // Step the current configuration towards the randomized configuration
        // proportionally to a randomized scaling factor
        double scale = mGradientP.mMaxPerturbationFactor*mDistribution(mMT);
        x += scale*(dx-x);
      }

      // Check if the equality constraints are satsified
      satisfied = true;
      for(size_t i=0; i<problem->getNumEqConstraints(); ++i)
      {
        mEqConstraintCostCache[i] = problem->getEqConstraint(i)->eval(x);
        if(std::abs(mEqConstraintCostCache[i]) > tol)
          satisfied = false;
      }

      // Check if the inequality constraints are satisfied
      for(size_t i=0; i<problem->getNumIneqConstraints(); ++i)
      {
        mIneqConstraintCostCache[i] = problem->getIneqConstraint(i)->eval(x);
        if(mIneqConstraintCostCache[i] > std::abs(tol))
          satisfied = false;
      }

      Eigen::Map<Eigen::VectorXd> dxMap(dx.data(), dim);
      Eigen::Map<Eigen::VectorXd> gradMap(grad.data(), dim);
      // Compute the gradient of the objective, combined with the weighted
      // gradients of the softened constraints
      problem->getObjective()->evalGradient(x, dxMap);
      for(int i=0; i < static_cast<int>(problem->getNumEqConstraints()); ++i)
      {
        if(std::abs(mEqConstraintCostCache[i]) < tol)
          continue;

        problem->getEqConstraint(i)->evalGradient(x, gradMap);

        // Get the user-specified weight if available, otherwise use the default
        // weight value
        double weight = mGradientP.mEqConstraintWeights.size() > i?
              mGradientP.mEqConstraintWeights[i] :
              mGradientP.mDefaultConstraintWeight;

        // We treat the constraint function as though we are minimizing its
        // absolute value. We do not want to treat it as though we are
        // minimizing its square, because that could adversely affect the
        // curvature of its derivative.
        dx += weight * grad * math::sign(mEqConstraintCostCache[i]);
      }

      for(int i=0; i < static_cast<int>(problem->getNumIneqConstraints()); ++i)
      {
        if(mIneqConstraintCostCache[i] < tol)
          continue;

        problem->getIneqConstraint(i)->evalGradient(x, gradMap);

        // Get the user-specified weight if available, otherwise use the
        // default weight value
        double weight = mGradientP.mIneqConstraintWeights.size() > i?
              mGradientP.mIneqConstraintWeights[i] :
              mGradientP.mDefaultConstraintWeight;

        dx += weight * grad;
      }

      x -= gamma*dx;
      clampToBoundary(x);

      if((x-lastx).norm() < tol)
        minimized = true;
      else
        minimized = false;

      lastx = x;
      ++stepCount;

      if(nullptr != mProperties.mOutStream &&
         mProperties.mIterationsPerPrint > 0 &&
         stepCount%mProperties.mIterationsPerPrint == 0)
      {
        *mProperties.mOutStream
            << "[GradientDescentSolver] Progress (attempt #"
            << attemptCount << " | iteration #" << stepCount << ")\n"
            << "cost: " << problem->getObjective()->eval(x) << " | "
            << (minimized? "minimized | " : "not minimized | ")
            << (satisfied? "constraints satisfied | "
                         : "constraints unsatisfied | ")
            << "x: " << x.transpose() << "\n"
            << "grad: " << dx.transpose() << std::endl;
      }

      if(stepCount > mProperties.mNumMaxIterations)
        break;

    } while(!minimized || !satisfied);

    if(!minimized || !satisfied)
    {
      ++attemptCount;

      if(mGradientP.mMaxAttempts > 0 && attemptCount >= mGradientP.mMaxAttempts)
        break;

      if(attemptCount-1 < problem->getSeeds().size())
      {
        x = problem->getSeed(attemptCount-1);
      }
      else
      {
        randomizeConfiguration(x);
      }
    }

  } while(!minimized || !satisfied);

  mLastConfig = x;
  problem->setOptimalSolution(x);
  problem->setOptimumValue(problem->getObjective()->eval(x));

  return minimized && satisfied;
}
void multiTaskRecursiveLinearEstimator::setOutputErrorStandardDeviation(const Eigen::VectorXd &sigma_oe_input)
{
    assert(sigma_oe.size() == m);
    assert(sigma_oe_input.size() == m);
    sigma_oe = sigma_oe_input;
}
 double operator()(const Eigen::VectorXd & x1, const Eigen::VectorXd & x2) const {
   assert(x1.size() == _ell.size());
   double z = (x1 - x2).cwiseQuotient(_ell).squaredNorm();
   return _sf2 * exp(-0.5 * z);
 }
Exemple #29
0
//==============================================================================
void NullFunction::evalGradient(const Eigen::VectorXd& _x,
                                Eigen::Map<Eigen::VectorXd> _grad)
{
  _grad.resize(_x.size());
  _grad.setZero();
}
Exemple #30
0
void SubSystem::setParams(Eigen::VectorXd &xIn)
{
    assert(xIn.size() == psize);
    for (int i=0; i < psize; i++)
        pvals[i] = xIn[i];
}