Exemple #1
0
Datum blocker(const RealMatrix& Ua, const RealMatrix sa, const vGroup& ensemble, const uint blocksize, uint repeats, ExtractPolicy& policy) {


  
  TimeSeries<double> coverlaps;

  for (uint i=0; i<repeats; ++i) {
    vector<uint> picks = pickFrames(ensemble.size(), blocksize);
    
    if (debug) {
      cerr << "***Block " << blocksize << ", replica " << i << ", picks " << picks.size() << endl;
      dumpPicks(picks);
    }
    
    vGroup subset = subgroup(ensemble, picks);
    boost::tuple<RealMatrix, RealMatrix> pca_result = pca(subset, policy);
    RealMatrix s = boost::get<0>(pca_result);
    RealMatrix U = boost::get<1>(pca_result);

    if (length_normalize)
      for (uint j=0; j<s.rows(); ++j)
        s[j] /= blocksize;

    coverlaps.push_back(covarianceOverlap(sa, Ua, s, U));
  }

  return( Datum(coverlaps.average(), coverlaps.variance(), coverlaps.size()) );

}
// subtract the mean from each row
void ZeroSum(RealMatrix& mat)
{
	RealVector sum(mat.size2(), 0.0);
	for (size_t j=0; j<mat.size1(); j++) sum += row(mat, j);
	RealVector mean = (1.0 / mat.size1()) * sum;
	for (size_t j=0; j<mat.size1(); j++) row(mat, j) -= mean;
}
Exemple #3
0
		// adds just a value c on the input
		void eval(RealMatrix const& patterns, RealMatrix& output, State& state)const
		{
			output.resize(patterns.size1(),m_dim);
			for(std::size_t  p = 0; p != patterns.size1();++p){
				for (size_t i=0;i!=m_dim;++i)
					output(p,i)=patterns(p,i)+m_c;
			}
		}
Exemple #4
0
void OnlineRNNet::weightedParameterDerivative(RealMatrix const& pattern, const RealMatrix& coefficients,  RealVector& gradient) {
    SIZE_CHECK(pattern.size1()==1);//we can only process a single input at a time.
    SIZE_CHECK(coefficients.size1()==1);
    SIZE_CHECK(pattern.size2() == inputSize());
    SIZE_CHECK(pattern.size2() == coefficients.size2());
    gradient.resize(mpe_structure->parameters());

    std::size_t numNeurons = mpe_structure->numberOfNeurons();
    std::size_t numUnits = mpe_structure->numberOfUnits();

    //first check wether this is the first call of the derivative after a change of internal structure. in this case we have to allocate A LOT
    //of memory for the derivative and set it to zero.
    if(m_unitGradient.size1() != mpe_structure->parameters() || m_unitGradient.size2() != numNeurons) {
        m_unitGradient.resize(mpe_structure->parameters(),numNeurons);
        m_unitGradient.clear();
    }

    //for the next steps see Kenji Doya, "Recurrent Networks: Learning Algorithms"

    //calculate the derivative for all neurons f'
    RealVector neuronDerivatives(numNeurons);
    for(std::size_t i=0; i!=numNeurons; ++i) {
        neuronDerivatives(i)=mpe_structure->neuronDerivative(m_activation(i+inputSize()+1));
    }

    //calculate the derivative for every weight using the derivative of the last time step
    auto hiddenWeights = columns(
                             mpe_structure->weights(),
                             inputSize()+1,numUnits
                         );

    //update the new gradient with the effect of last timestep
    noalias(m_unitGradient) = prod(m_unitGradient,trans(hiddenWeights));

    //add the effect of the current time step
    std::size_t param = 0;
    for(std::size_t i = 0; i != numNeurons; ++i) {
        for(std::size_t j = 0; j != numUnits; ++j) {
            if(mpe_structure->connection(i,j)) {
                m_unitGradient(param,i) += m_lastActivation(j);
                ++param;
            }
        }
    }

    //multiply with outer derivative of the neurons
    for(std::size_t i = 0; i != m_unitGradient.size1(); ++i) {
        noalias(row(m_unitGradient,i)) = element_prod(row(m_unitGradient,i),neuronDerivatives);
    }
    //and formula 4 (the gradient itself)
    noalias(gradient) = prod(
                            columns(m_unitGradient,numNeurons-outputSize(),numNeurons),
                            row(coefficients,0)
                        );
    //sanity check
    SIZE_CHECK(param == mpe_structure->parameters());
}
void LinearFBSystem::SetResponseMatrix (const RealMatrix& M)
{
	assert(signals.Size()==M.nrows() && actuators.Size()==M.ncols());
	if(Mi!=0)
	{
		delete Mi;
	}
	Mi = new SVDMatrix<double>(M);
}
	double evalDerivative(
		RealMatrix const&, 
		RealMatrix const& prediction, 
		RealMatrix& gradient
	) const {
		gradient.resize(prediction.size1(),prediction.size2());
		gradient.clear();
		return 0;
	}
inline void copy(const std::vector<Real>& vector, RealMatrix& realmatrix)
{
  cf3_assert(realmatrix.size() == vector.size());
  for (Uint row=0; row<realmatrix.rows(); ++row)
  {
    for (Uint col=0; col<realmatrix.cols(); ++col)
    {
      realmatrix(row,col) = vector[realmatrix.cols()*row + col];
    }
  }
}
Exemple #8
0
RealMatrix::RealMatrix(const RealMatrix& A)
{
    delete[] entries;
    m = A.rows();
    n = A.columns();
    entries = new double[m * n];
    
    for(int i=0; i<m; ++i)
        for(int j=0; j<n; ++j)
            this->operator()(i,j) = A(i,j);
}
void DiscreteLoss::defineCostMatrix(RealMatrix const& cost){
	// check validity
	std::size_t size = cost.size1();
	SHARK_ASSERT(cost.size2() == size);
	for (std::size_t i = 0; i != size; i++){
		for (std::size_t j = 0; j != size; j++){
			SHARK_ASSERT(cost(i, j) >= 0.0);
		}
		SHARK_ASSERT(cost(i, i) == 0.0);
	}
	m_cost = cost;
}
Exemple #10
0
void CElements::put_coordinates(RealMatrix& elem_coords, const Uint elem_idx) const
{
  const CTable<Real>& coords_table = nodes().coordinates();
  CConnectivity::ConstRow elem_nodes = node_connectivity()[elem_idx];

  const Uint nb_nodes=elem_coords.rows();
  const Uint dim=elem_coords.cols();


  for(Uint node = 0; node != nb_nodes; ++node)
    for (Uint d=0; d<dim; ++d)
      elem_coords(node,d) = coords_table[elem_nodes[node]][d];
}
void AOIntegrals::transformAORII(){
  if(!this->haveRIS) this->computeAORIS();
  if(!this->haveRII) this->computeAORII();

  RealMap S(&this->aoRIS_->storage()[0],this->DFbasisSet_->nBasis(),this->DFbasisSet_->nBasis());
  RealMatrix ShalfMat = S.pow(-0.5);
  RealTensor2d Shalf(this->DFbasisSet_->nBasis(),this->DFbasisSet_->nBasis());
  for(auto i = 0; i < Shalf.size(); i++)
    Shalf.storage()[i] = ShalfMat.data()[i];
  RealTensor3d A0(this->nBasis_,this->nBasis_,this->DFbasisSet_->nBasis());
  enum{i,j,k,l,X,Y};
  contract(1.0,*this->aoRII_,{i,j,X},Shalf,{X,Y},0.0,A0,{i,j,Y});
  *this->aoRII_ = A0;
  this->haveTRII = true;
}
Exemple #12
0
void CMACMap::eval(RealMatrix const& patterns,RealMatrix &output) const {
    SIZE_CHECK(patterns.size2() == m_inputSize);
    std::size_t numPatterns = patterns.size1();
    output.resize(numPatterns,m_outputSize);
    output.clear();

    for(std::size_t i = 0; i != numPatterns; ++i) {
        std::vector<std::size_t> indizes = getIndizes(row(patterns,i));
        for (std::size_t o=0; o!=m_outputSize; ++o) {
            for (std::size_t j = 0; j != m_tilings; ++j) {
                output(i,o) += m_parameters(indizes[j] + o*m_parametersPerTiling);
            }
        }
    }
}
Exemple #13
0
inline Rcpp::List
RipsDiagArbitDionysusPhat(const RealMatrix  & X
                        , const int           maxdimension
                        , const double        maxscale
                        , const std::string & library
                        , const bool          location
                        , const bool          printProgress
	) {
	std::vector< std::vector< std::vector< double > > > persDgm;
	std::vector< std::vector< std::vector< unsigned > > > persLoc;
	std::vector< std::vector< std::vector< std::vector< unsigned > > > > persCycle;

	PointContainer points = RcppToStl< PointContainer >(X, true);
	//read_points2(infilename, points);

	PairDistancesA           distances(points);
	GeneratorA               rips(distances);
	GeneratorA::Evaluator    size(distances);
	FltrRA                   f;

	// Generate 2-skeleton of the Rips complex for epsilon = 50
	rips.generate(maxdimension + 1, maxscale, make_push_back_functor(f));

	if (printProgress) {
		Rprintf("# Generated complex of size: %d \n", f.size());
	}

	// Sort the simplices with respect to distance 
	f.sort(GeneratorA::Comparison(distances));

	// Compute the persistence diagram of the complex
	if (library[0] == 'D') {
		computePersistenceDionysus< PersistenceR >(f, size, maxdimension,
				Rcpp::NumericVector(X.nrow()), location, printProgress,
				persDgm, persLoc, persCycle);
	}
	if (library[0] == 'P') {
		computePersistencePhat(f, size, maxdimension,
				Rcpp::NumericVector(X.nrow()), location, printProgress,
				persDgm, persLoc);
	}

	// Output persistent diagram
	return Rcpp::List::create(
			concatStlToRcpp< Rcpp::NumericMatrix >(persDgm, true, 3),
			concatStlToRcpp< Rcpp::NumericMatrix >(persLoc, false, 2),
			StlToRcppMatrixList< Rcpp::List, Rcpp::NumericMatrix >(persCycle));
}
Exemple #14
0
void CCellFaces::put_coordinates(RealMatrix& elem_coords, const Uint face_idx) const
{
  const CTable<Real>& coords_table = nodes().coordinates();
  std::vector<Uint> face_nodes = m_cell_connectivity->face_nodes(face_idx);

  const Uint nb_nodes=elem_coords.rows();
  const Uint dim=elem_coords.cols();

  cf_assert(nb_nodes == face_nodes.size());
  cf_assert(dim==coords_table.row_size());

  for(Uint node = 0; node != nb_nodes; ++node)
    for (Uint d=0; d<dim; ++d)
      elem_coords(node,d) = coords_table[face_nodes[node]][d];

}
    void SVMLightDataModel::save (std::string filePath) {
        std::cout << "Saving model to " << filePath << std::endl;

        // create new datastream
        std::ofstream ofs;
        ofs.open (filePath.c_str());

        if (!ofs)
            throw (SHARKSVMEXCEPTION ("[export_SVMLight] file can not be opened for writing"));


        // create header first
        saveHeader (ofs);
        RealMatrix preparedAlphas = container -> m_alphas;

        // prepare for binary case

        switch (container -> m_svmType) {
            case SVMTypes::CSVC: {
                // nothing to prepare for CSVC
                break;
            }

            case SVMTypes::Pegasos: {
                // FIXME: multiclass
                // throw away the alphas we do not need
                preparedAlphas.resize (container -> m_alphas.size1(), 1);

                // FIXME: better copy...
                for (size_t j = 0; j < container -> m_alphas.size1(); ++j) {
                    preparedAlphas (j, 0) = container -> m_alphas (j, 1);
                }

                break;
            }

            default: {
                throw (SHARKSVMEXCEPTION ("[SVMLightDataModel::save] Unsupported SVM type."));
            }
        }


        // then save data in SVMLight format
        container -> saveSparseLabelAndData (ofs);

        ofs.close();
    }
Exemple #16
0
void CSpace::put_coordinates(RealMatrix& coordinates, const Uint elem_idx) const
{
    if (bound_fields().has_coordinates())
    {
        CConnectivity::ConstRow indexes = indexes_for_element(elem_idx);
        Field& coordinates_field = bound_fields().coordinates();

        cf_assert(coordinates.rows() == indexes.size());
        cf_assert(coordinates.cols() == coordinates_field.row_size());

        for (Uint i=0; i<coordinates.rows(); ++i)
        {
            for (Uint j=0; j<coordinates.cols(); ++j)
            {
                coordinates(i,j) = coordinates_field[i][j];
            }
        }
    }
    else
    {
        const ShapeFunction& space_sf       = shape_function();
        const CEntities&     geometry       = support();
        const ElementType&   geometry_etype = element_type();
        const ShapeFunction& geometry_sf    = geometry_etype.shape_function();
        RealMatrix geometry_coordinates = geometry.get_coordinates(elem_idx);

        cf_assert(coordinates.rows() == space_sf.nb_nodes());
        cf_assert(coordinates.cols() == geometry_etype.dimension());
        for (Uint node=0; node<space_sf.nb_nodes(); ++node)
        {
            coordinates.row(node) = geometry_sf.value( space_sf.local_coordinates().row(node) ) * geometry_coordinates;
        }
    }
}
void
AccpmLALinSolve(const RealMatrix &A, bool cholesky, RealMatrix &X, const RealMatrix &B)
{
  if (!cholesky) {
    AccpmLALinearSolve(A, X, B);
    return;
  }
  char uplo = 'L';
  long int n = A.size(0);
  long int lda = A.inc(0) * A.gdim(0);
  long int nrhs = B.size(1);
  long int ldb = B.inc(0) * B.gdim(0);
  long int info;
  RealMatrix L(A);

  X.inject(B);
  //double alpha = 1;
  //F77NAME(dtrsm)("L","U","T","N",&n,&nrhs,&alpha,&A(0,0),&n,&X(0,0),&n);
  //F77NAME(dtrsm)("L","U","N","N",&n,&nrhs,&alpha,&A(0,0),&n,&X(0,0),&n);
 
  //F77NAME(dpotrs)(&uplo, &n, &nrhs, (doublereal *)&L(0,0), &lda, &X(0,0), &ldb, &info);
  F77NAME(dpotrs)(&uplo, &n, &nrhs, &L(0,0), &lda, &X(0,0), &ldb, &info);
  if (info < 0) {
    std::cerr << "AccpmLALinSolve: argument " << -info << " is invalid" 
  	      << std::endl;
  }
  
}
Exemple #18
0
	TestFunction(bool noisy):A(3,3),m_noisy(noisy)
	{
		A.clear();
		A(0,0)=20;
		A(1,1)=10;
		A(2,2)=5;

		m_features|=Base::HAS_FIRST_DERIVATIVE;
	}
Exemple #19
0
		virtual void weightedParameterDerivative( RealMatrix const& input, RealMatrix const& coefficients, State const& state, RealVector& derivative)const
		{
			derivative.resize(1);
			derivative(0)=0;
			for (size_t p = 0; p < coefficients.size1(); p++)
			{
				derivative(0) +=sum(row(coefficients,p));
			}
		}
int
AccpmLACholeskyFactor(const RealMatrix &A, RealMatrix &L)
{
  L.copy(A);

  char uplo = 'L';
  long int n = A.size(0);
  long int lda = A.inc(0) * A.gdim(0);
  long int info;
  F77NAME(dpotrf) (&uplo, &n, &L(0,0), &lda, &info);
  if (info > 0) {
    std::cerr << "AccpmLACholeskyFactor: Matrix is not Symmetric-positive-definite." 
  	      << std::endl;
  } else if (info < 0) {
    std::cerr << "AccpmLACholeskyFactor: argument " << -info << " is invalid" 
  	      << std::endl;
  }
  return info;
}
Exemple #21
0
//! Returns a model mapping encoded data from the
//! m-dimensional PCA coordinate system back to the
//! n-dimensional original coordinate system.
void PCA::decoder(LinearModel<>& model, std::size_t m) {
	if(!m) m = std::min(m_n,m_l);
	if( m == m_n && !m_whitening){
		model.setStructure(m_eigenvectors, m_mean);
	}
	RealMatrix A = columns(m_eigenvectors, 0, m);
	if(m_whitening){
		for(std::size_t i=0; i<A.size2(); i++) {
			//take care of numerical difficulties for very small eigenvalues.
			if(m_eigenvalues(i)/m_eigenvalues(0) < 1.e-15){
				column(A,i).clear();		
			}
			else{
				column(A, i) = column(A, i) * std::sqrt(m_eigenvalues(i));
			}
		}
	}

	model.setStructure(A, m_mean);
}
Exemple #22
0
//! Returns a model mapping the original data to the
//! m-dimensional PCA coordinate system.
void PCA::encoder(LinearModel<>& model, std::size_t m) {
	if(!m) m = std::min(m_n,m_l);
	
	RealMatrix A = trans(columns(m_eigenvectors, 0, m) );
	RealVector offset = -prod(A, m_mean);
	if(m_whitening){
		for(std::size_t i=0; i<A.size1(); i++) {
			//take care of numerical difficulties for very small eigenvalues.
			if(m_eigenvalues(i)/m_eigenvalues(0) < 1.e-15){
				row(A,i).clear();
				offset(i) = 0;			
			}
			else{
				row(A, i) /= std::sqrt(m_eigenvalues(i));
				offset(i) /= std::sqrt(m_eigenvalues(i));
			}
		}
	}
	model.setStructure(A, offset);
}
Exemple #23
0
void CMACMap::weightedParameterDerivative(
    RealMatrix const& patterns,
    RealMatrix const& coefficients,
    State const&,//not needed
    RealVector &gradient
) const {
    SIZE_CHECK(patterns.size2() == m_inputSize);
    SIZE_CHECK(coefficients.size2() == m_outputSize);
    SIZE_CHECK(coefficients.size1() == patterns.size1());
    std::size_t numPatterns = patterns.size1();
    gradient.resize(m_parameters.size());
    gradient.clear();
    for(std::size_t i = 0; i != numPatterns; ++i) {
        std::vector<std::size_t> indizes = getIndizes(row(patterns,i));
        for (std::size_t o=0; o!=m_outputSize; ++o) {
            for (std::size_t j=0; j != m_tilings; ++j) {
                gradient(indizes[j] + o*m_parametersPerTiling) += coefficients(i,o);
            }
        }
    }
}
Exemple #24
0
void Reconstruct::configure_from_to()
{
  std::vector<std::string> from_to; option("from_to").put_value(from_to);

  if (is_not_null(m_from))
    remove_component(*m_from);
  m_from = create_component("from_"+from_to[0],from_to[0]).as_ptr<ShapeFunction>();
  if (is_not_null(m_to))
    remove_component(*m_to);
  m_to   = create_component("to_"+from_to[1],from_to[1]).as_ptr<ShapeFunction>();

  m_value_reconstruction_matrix.resize(m_to->nb_nodes(),m_from->nb_nodes());
  m_gradient_reconstruction_matrix.resize(m_to->dimensionality());
  for (Uint d=0; d<m_gradient_reconstruction_matrix.size(); ++d)
    m_gradient_reconstruction_matrix[d].resize(m_to->nb_nodes(),m_from->nb_nodes());
  for (Uint to_node=0; to_node<m_to->nb_nodes(); ++to_node)
  {
    m_value_reconstruction_matrix.row(to_node) = m_from->value( m_to->local_coordinates().row(to_node) );
    RealMatrix grad = m_from->gradient( m_to->local_coordinates().row(to_node) );
    for (Uint d=0; d<m_to->dimensionality(); ++d)
      m_gradient_reconstruction_matrix[d].row(to_node) = grad.row(d);
  }
}
Exemple #25
0
void OnlineRNNet::eval(RealMatrix const& pattern, RealMatrix& output){
	SIZE_CHECK(pattern.size1()==1);//we can only process a single input at a time.
	SIZE_CHECK(pattern.size2() == inputSize());
	
	std::size_t numUnits = mpe_structure->numberOfUnits();
	
	if(m_lastActivation.size() != numUnits){
		m_activation.resize(numUnits);
		m_lastActivation.resize(numUnits);
		zero(m_activation);
		zero(m_lastActivation);
	}
	swap(m_lastActivation,m_activation);

	//we want to treat input and bias neurons exactly as hidden or output neurons, so we copy the current
	//pattern at the beginning of the the last activation pattern aand set the bias neuron to 1
	////so m_lastActivation has the format (input|1|lastNeuronActivation)
	noalias(subrange(m_lastActivation,0,mpe_structure->inputs())) = row(pattern,0);
	m_lastActivation(mpe_structure->bias())=1;
	m_activation(mpe_structure->bias())=1;

	//activation of the hidden neurons is now just a matrix vector multiplication

	fast_prod(
		mpe_structure->weights(),
		m_lastActivation,
		subrange(m_activation,inputSize()+1,numUnits)
	);

	//now apply the sigmoid function
	for (std::size_t i = inputSize()+1;i != numUnits;i++){
		m_activation(i) = mpe_structure->neuron(m_activation(i));
	}
	//copy the result to the output
	output.resize(1,outputSize());
	noalias(row(output,0)) = subrange(m_activation,numUnits-outputSize(),numUnits);
}
RealMatrix NormalizeComponentsWhitening::createWhiteningMatrix(
	RealMatrix& covariance
){
	SIZE_CHECK(covariance.size1() == covariance.size2());
	std::size_t m = covariance.size1();
	//we use the inversed cholesky decomposition for whitening
	//since we have to assume that covariance does not have full rank, we use
	//the generalized decomposition
	RealMatrix whiteningMatrix(m,m,0.0);

	//do a pivoting cholesky decomposition
	//this destroys the covariance matrix as it is not neeeded anymore afterwards.
	PermutationMatrix permutation(m);
	std::size_t rank = pivotingCholeskyDecompositionInPlace(covariance,permutation);
	//only take the nonzero columns as C
	auto C = columns(covariance,0,rank);

	//full rank, means that we can use the typical cholesky inverse with pivoting
	//so U is P C^-1 P^T
	if(rank == m){
		noalias(whiteningMatrix) = identity_matrix<double>( m );
		solveTriangularSystemInPlace<SolveXAB,upper>(trans(C),whiteningMatrix);
		swap_full_inverted(permutation,whiteningMatrix);
		return whiteningMatrix;
	}
	//complex case. 
	//A' = P C(C^TC)^-1(C^TC)^-1 C^T P^T
	//=> P^T U P = C(C^TC)^-1
	//<=> P^T U P (C^TC) = C
	RealMatrix CTC = prod(trans(C),C);

	auto submat = columns(whiteningMatrix,0,rank);
	solveSymmPosDefSystem<SolveXAB>(CTC,submat,C);
	swap_full_inverted(permutation,whiteningMatrix);

	return whiteningMatrix;
}
int
AccpmLASymmLinSolve(SymmetricMatrix &A, RealMatrix &X, const RealMatrix &B)
{
  LaLowerTriangMatDouble L(A);
  char uplo = 'L';
  long int n = A.size(0);
  long int lda = A.inc(0) * A.gdim(0);
  long int nrhs = B.size(1);
  long int ldb = B.inc(0) * B.gdim(0);
  long int info;
  
  X.inject(B);

  F77NAME(dposv) (&uplo, &n, &nrhs, &L(0,0), &lda, &X(0,0), &ldb, &info);
 
  if (info > 0) {
    std::cerr << "AccpmLASymmLinSolve: Symmetric Matrix is not positive-definite." 
	      << std::endl;
  } else if (info < 0) {
    std::cerr << "AccpmLASymmLinSolve: argument " << -info << " is invalid" 
	      << std::endl;
  }
  return info;
}
Exemple #28
0
vector<double> calculatePercentageContacts(const RealMatrix& M) {
  vector<long> sum(M.cols()-1, 0);

  for (uint i=1; i<M.cols(); ++i)
    for (uint j=0; j<M.rows(); ++j)
      sum[i-1] += M(j, i);
  

  vector<double> occ(M.cols()-1, 0.0);
  for (uint i=0; i<M.cols()-1; ++i)
    occ[i] = static_cast<double>(sum[i]) / M.rows();

  return(occ);
}
Exemple #29
0
void SNLLBase::
copy_con_grad(const RealMatrix& local_fn_grads, RealMatrix& grad_g,
              const size_t& offset)
{
  // Unlike DAKOTA, OPT++ expects nonlinear equations followed by nonlinear
  // inequalities.  Therefore, we have to reorder the constraint gradients.

  // Assign the gradients of the nonlinear constraints.  Let g: R^n -> R^m.
  // The gradient of g, grad_g, is the n x m matrix whose ith column is the
  // gradient of g_i.  The transpose of grad_g is called the Jacobian of g.
  // WJB: candidate for more efficient rewrite?? (both are ColumnMajor grads!)
  size_t i, j, n = local_fn_grads.numRows(),
    num_nln_eq_con   = optLSqInstance->numNonlinearEqConstraints,
    num_nln_ineq_con = optLSqInstance->numNonlinearIneqConstraints;
  for (i=0; i<n; i++)
    for (j=0; j<num_nln_eq_con; j++)
      grad_g(i, j) = local_fn_grads(i,offset+num_nln_ineq_con+j);
  for (i=0; i<n; i++)
    for (j=0; j<num_nln_ineq_con; j++)
      grad_g(i, j+num_nln_eq_con) = local_fn_grads(i,offset+j);
}
Exemple #30
0
void CSpace::allocate_coordinates(RealMatrix& coordinates) const
{
    coordinates.resize(nb_states(),element_type().dimension());
}