bool toEigen(const yarp::sig::Vector & vec_yrp, Eigen::VectorXd & vec_eigen)
{
 if( vec_yrp.size() != vec_eigen.size() ) { vec_eigen.resize(vec_yrp.size()); }
    if( memcpy(vec_eigen.data(),vec_yrp.data(),sizeof(double)*vec_eigen.size()) != NULL ) {
        return true;
    } else {
        return false;
    }
}
Exemple #2
0
	output fc_rnn::execute(input const& in)
	{
		// Set activation of input neurons
		auto const num_input = in.size();
		for(size_t n = 0; n < num_input; ++n)
		{
			vInput[n] = in[n];
		}

		// Summation for hidden neurons
		Eigen::VectorXd vHiddenSums =
			wmInput * vInput +
			wmHidden * vHidden;
		// Transfer function
		vHidden =
			evaluate(af_hidden, vHiddenSums.array());

		// TODO: Maybe should just store as a single vector?
		Eigen::VectorXd joined(input_layer_count() + hidden_count());
		joined << vInput, vHidden;
		Eigen::VectorXd vOutputSums =
			wmOutput * joined;
		Eigen::VectorXd vOutput =
			evaluate(af_output, vOutputSums.array());

		// Return the output values
		output out{ output_count() };
		std::copy(vOutput.data(), vOutput.data() + output_count(), out.begin());
		return out;
	}
void RigidBodies3DSobogusInterface::fromPrimal( const unsigned num_bodies, const Eigen::VectorXd& masses, const Eigen::VectorXd& f_in, const unsigned num_contacts, const Eigen::VectorXd& mu, const Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic,Eigen::ColMajor>& contact_bases, const Eigen::VectorXd& w_in, const Eigen::VectorXi& obj_a, const Eigen::VectorXi& obj_b, const Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic,Eigen::RowMajor>& HA, const Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic,Eigen::RowMajor>& HB )
{
  reset();

  // Copy M
  // We don't actually need it after having computed a factorization of M, but we keep it around
  // in case we want to use dumpToFile()
  assert( m_primal != nullptr );
  m_primal->M.reserve( num_bodies );
  m_primal->M.setRows( num_bodies, 6 );
  m_primal->M.setCols( num_bodies, 6 );
  for( unsigned bdy_idx = 0; bdy_idx < num_bodies; ++bdy_idx )
  {
    m_primal->M.insertBack( bdy_idx, bdy_idx ) = Eigen::MatrixXd::Map( &masses( 36 * bdy_idx ), 6, 6 );
  }
  m_primal->M.finalize();

  // E
  m_primal->E.reserve( num_contacts );
  m_primal->E.setRows( num_contacts );
  m_primal->E.setCols( num_contacts );
  for( unsigned cntct_idx = 0; cntct_idx < num_contacts; ++cntct_idx )
  {
    m_primal->E.insertBack( cntct_idx, cntct_idx ) = contact_bases.block<3,3>( 0, 3 * cntct_idx );
  }
  m_primal->E.finalize() ;
  m_primal->E.cacheTranspose() ;

  // Build H
  m_primal->H.reserve( 2 * num_contacts );
  m_primal->H.setRows( num_contacts );
  m_primal->H.setCols( num_bodies, 6 );
  #ifndef BOGUS_DONT_PARALLELIZE
  #pragma omp parallel for
  #endif
  for( unsigned cntct_idx = 0; cntct_idx < num_contacts; ++cntct_idx )
  {
    if( obj_a( cntct_idx ) >= 0 && obj_b( cntct_idx ) >= 0 )
    {
      m_primal->H.insert( cntct_idx, obj_a( cntct_idx ) ) =   HA.block<3,6>( 3 * cntct_idx, 0 );
      m_primal->H.insert( cntct_idx, obj_b( cntct_idx ) ) = - HB.block<3,6>( 3 * cntct_idx, 0 );
    }
    else if( obj_a( cntct_idx ) >= 0 && obj_b( cntct_idx ) == -1 )
    {
      m_primal->H.insert( cntct_idx, obj_a( cntct_idx ) ) = HA.block<3,6>( 3 * cntct_idx, 0 );
    }
    #ifndef NDEBUG
    else
    {
      std::cerr << "Error, impossible code path hit in Balls2DSobogus::fromPrimal. This is a bug." << std::endl;
      std::exit( EXIT_FAILURE );
    }
    #endif
  }
  m_primal->H.finalize();

  m_primal->f = f_in.data();
  m_primal->w = w_in.data();
  m_primal->mu = mu.data();
}
// Parse right hand side arguments for a matlab mex function.
//
// Inputs:
//   nrhs  number of right hand side arguments
//   prhs  pointer to right hand side arguments
// Outputs:
//   V  n by dim list of mesh vertex positions
//   F  m by dim list of mesh face indices
//   s  1 by dim bone source vertex position
//   d  1 by dim bone dest vertex position
// "Throws" matlab errors if dimensions are not sane.
void parse_rhs(
  const int nrhs, 
  const mxArray *prhs[], 
  Eigen::MatrixXd & V,
  Eigen::MatrixXi & F,
  Eigen::VectorXd & s,
  Eigen::VectorXd & d)
{
  using namespace std;
  if(nrhs < 4)
  {
    mexErrMsgTxt("nrhs < 4");
  }
  const int dim = mxGetN(prhs[0]);
  if(dim != 3)
  {
    mexErrMsgTxt("Mesh vertex list must be #V by 3 list of vertex positions");
  }
  if(dim != (int)mxGetN(prhs[1]))
  {
   mexErrMsgTxt("Mesh facet size must equal dimension");
  }
  if(dim != (int)mxGetN(prhs[2]))
  {
   mexErrMsgTxt("Source dim must equal vertex dimension");
  }
  if(dim != (int)mxGetN(prhs[3]))
  {
   mexErrMsgTxt("Dest dim must equal vertex dimension");
  }
  // set number of mesh vertices
  const int n = mxGetM(prhs[0]);
  // set vertex position pointers
  double * Vp = mxGetPr(prhs[0]);
  // set number of faces
  const int m = mxGetM(prhs[1]);
  // set face index list pointer
  double * Fp = mxGetPr(prhs[1]);
  // set source and dest pointers
  double * sp = mxGetPr(prhs[2]);
  double * dp = mxGetPr(prhs[3]);
  // resize output to transpose
  V.resize(n,dim);
  copy(Vp,Vp+n*dim,V.data());
  // resize output to transpose
  F.resize(m,dim);
  // Q: Is this doing a cast?
  // A: Yes.
  copy(Fp,Fp+m*dim,F.data());
  // http://stackoverflow.com/a/4461466/148668
  transform(F.data(),F.data()+m*dim,F.data(),
    bind2nd(std::plus<double>(),-1.0));
  // resize output to transpose
  s.resize(dim);
  copy(sp,sp+dim,s.data());
  d.resize(dim);
  copy(dp,dp+dim,d.data());
}
void JointTrajGeneratorRML::computeTrajectory(
    const ros::Time rtt_now,
    const Eigen::VectorXd &init_position,
    const Eigen::VectorXd &init_velocity,
    const Eigen::VectorXd &init_acceleration,
    const ros::Duration duration,
    const Eigen::VectorXd &goal_position,
    const Eigen::VectorXd &goal_velocity,
    boost::shared_ptr<ReflexxesAPI> rml,
    boost::shared_ptr<RMLPositionInputParameters> rml_in,
    boost::shared_ptr<RMLPositionOutputParameters> rml_out,
    RMLPositionFlags &rml_flags) const
{
  // Update RML input parameters
  rml_in->SetMaxVelocityVector(&max_velocities_[0]);
  rml_in->SetMaxAccelerationVector(&max_accelerations_[0]);
  rml_in->SetMaxJerkVector(&max_jerks_[0]);

  // Set initial params
  for(size_t i=0;i<n_dof_;i++) {
    rml_in->SetSelectionVectorElement(true,i);
  }

  // Override initial state if necessary
  rml_in->SetCurrentPositionVector(init_position.data());
  rml_in->SetCurrentVelocityVector(init_velocity.data());
  rml_in->SetCurrentAccelerationVector(init_acceleration.data());

  // Set goal params
  rml_in->SetTargetPositionVector(goal_position.data());
  rml_in->SetTargetVelocityVector(goal_velocity.data());

  // Compute the trajectory
  if(verbose_) RTT::log(RTT::Debug) << ("RML Recomputing trajectory...") << RTT::endlog();

  // Set desired execution time for this trajectory (definitely > 0)
  rml_in->SetMinimumSynchronizationTime(std::max(0.0,duration.toSec()));

  // Verbose logging
  if(verbose_) RTT::log(RTT::Debug) << "RML IN: time: "<<rml_in->GetMinimumSynchronizationTime() << RTT::endlog();
  if(verbose_) RMLLog(RTT::Info, rml_in);

  // Compute trajectory
  int rml_result = rml->RMLPosition(
      *rml_in.get(),
      rml_out.get(),
      rml_flags);

  // Get expected time
  if(verbose_) RTT::log(RTT::Debug) << "RML OUT: time: "<<rml_out->GetGreatestExecutionTime() << RTT::endlog();

  // Throw exception on result
  this->handleRMLResult(rml_result);
}
Eigen::VectorXd TargetTrackingController::getControl(const EKF *ekf, const MultiAgentMotionModel *motionModel, const std::vector<const SensorModel*> &sensorModel, double *f) const {
  Evaluator evaluator(ekf, motionModel, sensorModel, params);

  Eigen::VectorXd p = Eigen::VectorXd::Zero(motionModel->getControlDim());
  Eigen::VectorXd lowerBound = Eigen::VectorXd::Constant(motionModel->getControlDim(), params("multi_rotor_control/controlMin").toDouble());
  Eigen::VectorXd upperBound = Eigen::VectorXd::Constant(motionModel->getControlDim(), params("multi_rotor_control/controlMax").toDouble());

  nlopt_opt opt = nlopt_create(NLOPT_LN_COBYLA, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LN_BOBYQA, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LN_NEWUOA_BOUND, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LN_PRAXIS, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LN_NELDERMEAD, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LN_SBPLX, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_GN_ORIG_DIRECT, p.size()); // failed
//  nlopt_opt opt = nlopt_create(NLOPT_GN_ORIG_DIRECT_L, p.size()); // very good: p    0.0118546 -6.27225e-05  6.27225e-05 -2.09075e-05  2.09075e-05 -8.51788e-06 -2.09075e-05           10
//  nlopt_opt opt = nlopt_create(NLOPT_GN_ISRES, p.size()); // rather bad
//  nlopt_opt opt = nlopt_create(NLOPT_GN_CRS2_LM, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LD_MMA, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LD_CCSAQ, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LD_SLSQP, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LD_LBFGS, p.size());
//  nlopt_opt opt = nlopt_create(NLOPT_LD_TNEWTON_PRECOND, p.size()); // bad
//  nlopt_opt opt = nlopt_create(NLOPT_LD_TNEWTON_PRECOND_RESTART, p.size()); // bad
//  nlopt_opt opt = nlopt_create(NLOPT_LD_VAR2, p.size());

  nlopt_set_min_objective(opt, f_evaluate, &evaluator);
  nlopt_set_lower_bounds(opt, lowerBound.data());
  nlopt_set_upper_bounds(opt, upperBound.data());
  nlopt_set_ftol_abs(opt, 1E-6);
  nlopt_set_xtol_rel(opt, 1E-3);
  nlopt_set_maxeval(opt, 1E8);
  nlopt_set_maxtime(opt, 7200);
  double pa[p.size()];
  memcpy(pa, p.data(), p.size()*sizeof(double));
  double cost = 0;
//  std::string tmp; std::cerr << "Press enter to start optimization\n"; std::getline(std::cin, tmp);
  nlopt_result ret = nlopt_optimize(opt, pa, &cost);
  Eigen::VectorXd p_res = Eigen::Map<Eigen::VectorXd>(pa, p.size());
  if (f)
    *f = cost;

  std::cerr << "\nInitial guess:\n";
  std::cerr << "  p " << p.transpose() << "\n";
  std::cerr << "  value " << evaluator.evaluate(p) << "\n";

  std::cerr << "Optimization result (return code " << ret << "):\n";
  std::cerr << "  p " << p_res.transpose() << "\n";
  std::cerr << "  value " << evaluator.evaluate(p_res) << "\n";
  nlopt_destroy(opt);
  return p_res;
}
Exemple #7
0
//==============================================================================
void Function::evalGradient(const Eigen::VectorXd& _x,
                            Eigen::Map<Eigen::VectorXd> _grad)
{
  // TODO(MXG): This is for backwards compatibility
  Eigen::Map<const Eigen::VectorXd> temp(_x.data(), _x.size());
  evalGradient(temp, _grad);
}
Exemple #8
0
//==============================================================================
double Function::eval(const Eigen::VectorXd& _x)
{
  // TODO(MXG): This is for backwards compatibility. This function should be
  // made pure abstract with the next major version-up
  Eigen::Map<const Eigen::VectorXd> temp(_x.data(), _x.size());
  return eval(temp);
}
bool AnchoredRectangleHandler::init(FactorGraphFilter* f, const string &name,
    const Eigen::VectorXd & T_OS, const Eigen::VectorXd & K) {

  _filter = f;
  _sensorName = name;

  // TODO: currently FHP works only if system is camera-centric, i.e., T_OS = I

  _filter->addConstantParameter("Camera_SOx", T_OS(0), true);
  _filter->addConstantParameter("Camera_SOy", T_OS(1), true);
  _filter->addConstantParameter("Camera_SOz", T_OS(2), true);

  _filter->addConstantParameter("Camera_qOSx", T_OS(4), true);
  _filter->addConstantParameter("Camera_qOSy", T_OS(5), true);
  _filter->addConstantParameter("Camera_qOSz", T_OS(6), true);

  _filter->addConstantParameter(Matrix3D, "Camera_CM", K, true);

  Eigen::Map<const Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > KasMatrix(
      K.data());
  _K = KasMatrix;

  _fx = _K(0, 0);
  _fy = _K(1, 1);

  return true;

}
Exemple #10
0
bool BlockSparseMatrix::ConjugateGradientSolve(const Eigen::VectorXd& rhs, Eigen::VectorXd& sol)
{
	//sol = mMatrix.llt().solve(rhs);
	return LltSolve(rhs, sol);

	MatrixSolver* solver = new PCGSolver();
	int vectorLength = static_cast<int>(rhs.size());
	double* result = new double[vectorLength];
	double* rhsData = new double[vectorLength];
	memcpy(rhsData, rhs.data(), vectorLength * sizeof(double));

	if (mbCSRDirty)
	{
		mCSREquivalent.ConvertFromBlockSparseMatrix(*this);
		mbCSRDirty = false;
	}

	solver->SetMatrix(&mCSREquivalent);
	solver->SetRHS(rhsData);
	solver->Solve(result, true);
	sol.resize(vectorLength);
	for (int i = 0; i < vectorLength; ++i)
		sol[i] = result[i];

	delete[] rhsData;
	delete[] result;
	delete solver;
	return true;
}
 bool ExpMapQuaternion::isInM_(const Eigen::VectorXd& val, const double& )
 {
   bool out(val.size()==4);
   double norm = toConstQuat(val.data()).norm();
   out = out && (fabs(norm - 1) < prec);
   return out;
 }
Exemple #12
0
/**
 * @function getNearestNeighbor
 */
int JG_RRT::getNearestNeighbor( const Eigen::VectorXd &qsample )
{
    struct kdres* result = kd_nearest( kdTree, qsample.data() );
    uintptr_t nearest = (uintptr_t) kd_res_item_data( result );

    activeNode = nearest;
    return nearest;
}
Exemple #13
0
void save( Archive & ar,
           const Eigen::VectorXd & t,
           const unsigned int file_version )
{
    int n0 = t.size();
    ar << BOOST_SERIALIZATION_NVP( n0 );
    ar << boost::serialization::make_array( t.data(),
                                            t.size() );
}
void CodeAtlas::FuzzyDependBuilder::buildChildDepend( QMultiHash<QString, ChildPack>& childList , 
													 Eigen::SparseMatrix<double>& vtxEdgeMat,
													 Eigen::VectorXd&             edgeWeightVec,
													 QList<FuzzyDependAttr::DependPair>& dependPair)
{
	QStringList codeList;
	QVector<ChildPack*> childPackPtr;
	for (QMultiHash<QString, ChildPack>::Iterator pChild = childList.begin();
		pChild != childList.end(); ++pChild)
	{
		codeList.push_back(pChild.value().m_code);
		childPackPtr.push_back(&pChild.value());
	}

	std::vector<Triplet> tripletArray;
	QVector<double>		 edgeWeightArray;

	// add dependency edges between child nodes
	int ithSrc = 0;
	for (QMultiHash<QString, ChildPack>::Iterator pChild = childList.begin();
		pChild != childList.end(); ++pChild, ++ithSrc)
	{
		// for each child, find number of occurrences of this child's name
		ChildPack& srcChild = pChild.value();
		const QString& srcName = pChild.key();
		QVector<int> occurence;
		WordExtractor::findOccurrence(srcName, codeList, occurence);

		for (int ithTar = 0; ithTar < childPackPtr.size(); ++ithTar)
		{
			int nOccur = occurence[ithTar];
			if (nOccur == 0 || ithTar == ithSrc)
				continue;

			ChildPack& tarChild = *childPackPtr[ithTar];

			SymbolEdge::Ptr pEdge = SymbolTree::getOrAddEdge(srcChild.m_pNode, tarChild.m_pNode, SymbolEdge::EDGE_FUZZY_DEPEND);
			pEdge->clear();
			pEdge->strength() = nOccur;

			int nEdge = tripletArray.size()/2;
			tripletArray.push_back(Triplet(srcChild.m_index, nEdge ,1.0));
			tripletArray.push_back(Triplet(tarChild.m_index, nEdge ,-1.0));

			edgeWeightArray.push_back(nOccur);
			dependPair.push_back(FuzzyDependAttr::DependPair(srcChild.m_pNode, tarChild.m_pNode, nOccur));
		}
	}

	if (int nEdges = tripletArray.size()/2)
	{
		vtxEdgeMat.resize(childList.size(),nEdges);
		vtxEdgeMat.setFromTriplets(tripletArray.begin(), tripletArray.end());
		edgeWeightVec.resize(nEdges);
		memcpy(edgeWeightVec.data(), edgeWeightArray.data(), sizeof(double)* nEdges);
	}
}
Exemple #15
0
void load( Archive & ar,
           Eigen::VectorXd & t,
           const unsigned int file_version )
{
    int n0;
    ar >> BOOST_SERIALIZATION_NVP( n0 );
    t.resize( n0 );
    ar >> make_array( t.data(), t.size() );
}
Exemple #16
0
    Eigen::VectorXd operator()(const F& obj, Eigen::VectorXd x) const{
      Eigen::VectorXd g, gnew, d, y, xold, s;
      Eigen::MatrixXd H, G;

      size_t n = x.size();

      lbfgs::minimizer<Real> minizer(n);
      lbfgs::traditional_convergence_test<Real> is_c(n);
      for(;;){
	Real f = obj(x);
	obj(x, g);
	if(minizer.run(x.data(), f, g.data())) continue;
	if(is_c(x.data(), g.data())) break;
	if(minizer.nfun() > 5000 * n) break;
      }

      return x;
    }
double f_evaluate(unsigned n, const double *x, double *grad, void *data) {
  TargetTrackingController::Evaluator *o = (TargetTrackingController::Evaluator*)data;
  Eigen::VectorXd p = Eigen::Map<const Eigen::VectorXd>(x, n);
  if (grad) {
    Eigen::VectorXd g = o->gradient(p);
    assert(g.size() == n);
    memcpy(grad, g.data(), n*sizeof(double));
  }
  return o->evaluate(p);
}
Exemple #18
0
//==============================================================================
void Function::evalGradient(const Eigen::VectorXd& _x,
                            Eigen::Map<Eigen::VectorXd> _grad)
{
  // TODO(MXG): This is for backwards compatibility. We suppress the
  // deprecated-warnings until then (see #544).
  Eigen::Map<const Eigen::VectorXd> temp(_x.data(), _x.size());

  DART_SUPPRESS_DEPRECATED_BEGIN
  evalGradient(temp, _grad);
  DART_SUPPRESS_DEPRECATED_END
}
 void OMPLRNStateSpace::OMPLToExoticaState(const ompl::base::State *state,
     Eigen::VectorXd &q) const
 {
   if (!state)
   {
     throw_pretty("Invalid state!");
   }
   if (q.rows() != (int) getDimension()) q.resize((int) getDimension());
   memcpy(q.data(),
       state->as<OMPLRNStateSpace::StateType>()->getRNSpace().values,
       sizeof(double) * q.rows());
 }
Exemple #20
0
	void KernelBand<NLSSystemObject>::Solve(Eigen::VectorXd& pi)
	{
		const double tstart = OpenSMOKE::OpenSMOKEGetCpuTime();

		J_factorized_->Solve(pi.data());

		const double tend = OpenSMOKE::OpenSMOKEGetCpuTime();

		numberOfLinearSystemSolutions_++;
		cpuTimeSingleLinearSystemSolution_ = tend - tstart;
		cpuTimeLinearSystemSolution_ += cpuTimeSingleLinearSystemSolution_;
	}
 void OMPLRNStateSpace::ExoticaToOMPLState(const Eigen::VectorXd &q,
     ompl::base::State *state) const
 {
   if (!state)
   {
     throw_pretty("Invalid state!");
   }
   if (q.rows() != (int) getDimension())
   {
     throw_pretty(
         "State vector ("<<q.rows()<<") and internal state ("<<(int)getDimension()<<") dimension disagree");
   }
   memcpy(state->as<OMPLRNStateSpace::StateType>()->getRNSpace().values,
       q.data(), sizeof(double) * q.rows());
 }
Exemple #22
0
/**
 * @function addNode
 */
int JG_RRT::addNode( const Eigen::VectorXd &qnew, int parent_ID)
{
    double goalDist = wsDistance( qnew );

    configVector.push_back( qnew );
    parentVector.push_back( parent_ID );
    goalDistVector.push_back( goalDist );

    uintptr_t ID = configVector.size() - 1;
    kd_insert( kdTree, qnew.data(), (void*)ID );
    add_Ranking( ID );

    activeNode = ID;
    return ID;
}
Exemple #23
0
/**
 * @function addNode
 */
int M_RRT::addNode( const Eigen::VectorXd &qnew, int parent_ID )
{
  double heuristic = heuristicCost( qnew );

  configVector.push_back( qnew );
  parentVector.push_back( parent_ID );
  heuristicVector.push_back( heuristic );  
  failureVector.push_back(0); 

  uintptr_t ID = configVector.size() - 1;
  kd_insert( kdTree, qnew.data(), (void*)ID );
  add_Ranking( ID );  

  activeNode = ID;
  return ID;
}
Exemple #24
0
/**
 * @function addNode
 * @brief Add _qnew to tree with parent _parentId
 * @return id of node added
 */
int B1RRT::addNode( const Eigen::VectorXd &_qnew, int _parentId )
{
    /// Update graph vectors -- what does this mean?
    configVector.push_back( _qnew );
    parentVector.push_back( _parentId );

    uintptr_t id = configVector.size() - 1;
    kd_insert( kdTree, _qnew.data(), (void*) id ); //&idVector[id];  /// WTH? -- ahq

    activeNode = id;

    /// Add node to ranking
    Eigen::VectorXd entry(3);
    entry << id, HeuristicCost( id, targetPose ), 0;
    pushRanking( entry );    

    return id;
}  
double RigidBodies3DSobogusInterface::evalInfNormError( const Eigen::VectorXd& r )
{
  if( !m_dual )
  {
    computeDual();
  }

  // Convert r to local coords
  assert( m_primal != nullptr );
  const Eigen::VectorXd r_loc{ m_primal->E.transpose() * r };

  // Setup GS parameters
  bogus::DualFrictionProblem<3u>::GaussSeidelType gs;
  gs.useInfinityNorm( true );
  assert( m_dual != nullptr );
  gs.setMatrix( m_dual->W );

  return m_dual->evalWith( gs, r_loc.data() );
}
Exemple #26
0
// Attempt G-H grid? http://dbarajassolano.wordpress.com/2012/01/26/on-sparse-grid-quadratures/
void ba81RefreshQuadrature(omxExpectation* oo)
{
	BA81Expect *state = (BA81Expect *) oo->argStruct;
	ba81NormalQuad &quad = state->getQuad();

	Eigen::VectorXd mean;
	Eigen::MatrixXd fullCov;
	state->getLatentDistribution(NULL, mean, fullCov);

	if (state->verbose >= 1) {
		mxLog("%s: refresh quadrature", oo->name);
		if (state->verbose >= 2) {
			int dim = mean.rows();
			pda(mean.data(), 1, dim);
			pda(fullCov.data(), dim, dim);
		}
	}

	quad.refresh(mean, fullCov);
}
Exemple #27
0
// IF THIS IS EVER TEMPLATED BE SURE THAT V IS COLMAJOR
IGL_INLINE void igl::winding_number(
    const Eigen::MatrixXd & V,
    const Eigen::MatrixXi & F,
    const Eigen::MatrixXd & O,
    Eigen::VectorXd & W)
{
    using namespace Eigen;
    // make room for output
    W.resize(O.rows(),1);
    switch(F.cols())
    {
    case 2:
        return winding_number_2(
                   V.data(),
                   V.rows(),
                   F.data(),
                   F.rows(),
                   O.data(),
                   O.rows(),
                   W.data());
    case 3:
    {
        WindingNumberAABB<Vector3d> hier(V,F);
        hier.grow();
        // loop over origins
        const int no = O.rows();
        #   pragma omp parallel for if (no>IGL_WINDING_NUMBER_OMP_MIN_VALUE)
        for(int o = 0; o<no; o++)
        {
            Vector3d p = O.row(o);
            W(o) = hier.winding_number(p);
        }
        break;
    }
    default:
        assert(false && "Bad simplex size");
        break;
    }
}
Exemple #28
0
Eigen::VectorXd BlockSparseMatrix::operator* (const Eigen::VectorXd& rhs) const
{
	CHECK(!mbCSRDirty);
	int vectorLength = static_cast<int>(rhs.size());
	double* rhsData = new double[vectorLength];
	double* result = new double[vectorLength];
	memcpy(rhsData, rhs.data(), vectorLength * sizeof(double));
	char trans = 'n';
	int numRows = mBlockHeight * mNumBlocksHeight;

#ifdef _MKL_IMPLEMENT
	mkl_dcsrgemv(&trans, &numRows, const_cast<double*>(mCSREquivalent.GetValueData()), const_cast<int*>(mCSREquivalent.GetRowId()), const_cast<int*>(mCSREquivalent.GetColumnId()), rhsData, result);
#else 
	CHECK(0) << "_MKL_IMPLEMENT not defined!";
#endif
	
	Eigen::VectorXd ret(vectorLength);
	for (int i = 0; i < vectorLength; ++i)
		ret[i] = result[i];
	delete[] rhsData;
	delete[] result;
	return ret;
}
Exemple #29
0
static int solveQP( const scalar& tol, MatrixXXsc& C, VectorXs& dvec, VectorXs& alpha )
{
  static_assert( std::is_same<scalar,double>::value, "QL only supports doubles." );

  assert( dvec.size() == alpha.size() );

  // All constraints are bound constraints.
  int m = 0;
  int me = 0;
  int mmax = 0;

  // C should be symmetric
  assert( ( C - C.transpose() ).lpNorm<Eigen::Infinity>() < 1.0e-14 );
  // Number of degrees of freedom.
  assert( C.rows() == C.cols() );
  int n{ int( C.rows() ) };
  int nmax = n;
  int mnn = m + n + n;
  assert( dvec.size() == nmax );

  // Impose non-negativity constraints on all variables
  Eigen::VectorXd xl = Eigen::VectorXd::Zero( nmax );
  Eigen::VectorXd xu = Eigen::VectorXd::Constant( nmax, std::numeric_limits<double>::infinity() );

  // u will contain the constraint multipliers
  Eigen::VectorXd u( mnn );

  // Status of the solve
  int ifail = -1;
  // Use the built-in cholesky decomposition
  int mode = 1;
  // Some fortran output stuff
  int iout = 0;
  // 1 => print output, 0 => silent
  int iprint = 1;

  // Working space
  assert( m == 0 && me == 0 && mmax == 0 );
  int lwar = 3 * ( nmax * nmax ) / 2 + 10 * nmax + 2;
  Eigen::VectorXd war( lwar );
  // Additional working space
  int liwar = n;
  Eigen::VectorXi iwar( liwar );

  {
    scalar tol_local = tol;
    ql_( &m,
        &me,
        &mmax,
        &n,
        &nmax,
        &mnn,
        C.data(),
        dvec.data(),
        nullptr,
        nullptr,
        xl.data(),
        xu.data(),
        alpha.data(),
        u.data(),
        &tol_local,
        &mode,
        &iout,
        &ifail,
        &iprint,
        war.data(),
        &lwar,
        iwar.data(),
        &liwar );
  }

  return ifail;
}
	auto biases(const Eigen::VectorXd& parameters, size_t i)
	{
		return Eigen::Map<Eigen::VectorXd>(const_cast<double*>(parameters.data() + bias_offsets[i]), size_layer(i + 1));
	}