RealMatrixSparse HyperbolicEquationUpwindRHSFunction::evaldFdY(Real time, RealVector& y){
	///
	/// this is not the exact Jacobian!
	///

	Integer y_size = y.size();
	RealMatrixSparse J(y_size,y_size);

	Integer col = 0;
	Real epsi = 1.e-3;
	Real iepsi = 1.0 / 1.e-3;
	RealVector F = evalF(time,y);

	for (Integer j = 0; j < y_size; ++j){

		RealVector yd = y;
		yd(j) = yd(j) + epsi;

		RealVector Fd = evalF(time,yd);

		for(Integer i=0; i< Fd.size(); i++){
			J.insert(i,col) = (Fd[i] - F[i]) * iepsi;
		}
		col = col + 1;
	}

	return J;
}
Пример #2
0
LPSolver::LPSolver(const RealMatrix& _A, const RealVector& _b, const RealVector& _c)
{
    N = B = 0;
    
    eps = 10E-07;
    
    m = _b.size();
    n = _c.size();
    D = RealMatrix(m+2, n+2);

    B = new int[m];
    N = new int[n+1];
    
    // copy matrix
    for(int i=0; i<m; ++i)
        for(int j=0; j<n; ++j)
            D(i,j) = _A(i,j);
    for(int i=0; i<m; ++i)
    {
        B[i] = n+i;
        D(i,n) = -1;
        D(i,n+1) = _b(i);
    }
    for(int j=0; j<n; ++j)
    {
        N[j] = j;
        D(m,j) = -_c(j);
    }
    N[n] = -1;
    D(m+1, n) = 1;
}
RealMatrixSparse HyperbolicEquationUpwindRHSFunction::evaldFdY(Real time, RealVector& y,IntegerVector& ref){

	// this is not the exact Jacobian!

		Integer y_size = y.size();
		Integer elem_ref = ref.sum();
		RealMatrixSparse J(elem_ref,elem_ref);


		Integer col = 0;
		Real epsi = 1.e-3;
		Real iepsi = 1.0 / 1.e-3;
		RealVector F = evalF(time,y,ref);

		for (Integer j = 0; j < y_size; ++j){
			if(ref(j) == 1){
				RealVector yd = y;
				yd(j) = yd(j) + epsi;

				RealVector Fd = evalF(time,yd,ref);

				for(Integer i=0; i< Fd.size(); i++){
					J.insert(i,col) = (Fd[i] - F[i]) * iepsi;
				}
				col = col + 1;
			}
		}

		return J;
}
Пример #4
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// evaluate
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RealVector MultiLayerPerceptron::evaluate( const RealVector& x )
{
   /// Assert validity of input.
   assert( !m_useBiasNodes && x.size() == m_input.size() || m_useBiasNodes && x.size() == m_input.size() - 1 );

   /// Set input.
   for ( size_t i = 0; i < x.size(); ++i )
   {
      m_input[ i ] = x[ i ];
   }

   /// Simple forward propagation when there are no hidden layers.
   if ( m_y.size() == 0 )
   {
      propagateForward( m_input, m_output, m_weights[ 0 ] );
   }
   else
   {
      /// Propage from input node to first hidden layer.
      propagateForward( m_input, m_y[ 0 ], m_weights[ 0 ] );
      applyActivationFunc( m_y[ 0 ], m_x[ 0 ] );

      /// Propagate to last hidden layer.
      for ( size_t iHiddenLayer = 0; iHiddenLayer < m_y.size() - 1; ++iHiddenLayer )
      {
         propagateForward( m_x[ iHiddenLayer ], m_y[ iHiddenLayer + 1 ], m_weights[ iHiddenLayer + 1 ] );
         applyActivationFunc( m_y[ iHiddenLayer + 1 ], m_x[ iHiddenLayer + 1 ] );
      }

      /// Propagate to output nodes.
      propagateForward( m_x.back(), m_output, m_weights.back() );
   }

   return m_output;
}
Пример #5
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// calcDerivative
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RealVector calcDerivative( const RealVector& x )
{
   RealVector result( x.size() );
   for ( size_t i = 1; i < x.size() - 1; ++i )
   {
      result[i] = ( x[i+1] - x[i-1] ) / 2;
   }
   result[0] = x[1] - x[0];
   result[ x.size() - 1 ] = x[ x.size() - 1 ] - x[ x.size() - 2 ];
   return result;
}
Пример #6
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// constructor
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LinearInterpolator::LinearInterpolator( const RealVector& x, const RealVector& y )
{
   assert( x.size() > 0 );
   assert( x.size() == y.size() );

   SortCache sc( x );
   m_x = sc.applyTo( x );
   m_y = sc.applyTo( y );

   m_xWidth = m_x.back() - m_x.front();
}
Пример #7
0
///////////////////////////////////////////////////////////////////////////////
//
/// Find zeroes of the polynomial \f$y = sum_i e_ix^i\f$,
/// where \f$e\f$ is the \f$c^{th}\f$ eigenvector, and put any zeroes
/// that lie in the interval [0:1] into \c k.
///
/// \param c The identifier of the eigenvector to find zeroes of.
//
///////////////////////////////////////////////////////////////////////////////
void AntisymmetricExpFit::fit_exponents(int c) {
  const int 	N = 2.0*ESolver.eigenvectors().rows();
  int		i,j;

  // --- Construct the eigenpolynomial
  // --- from the eigenvalues
  // ---------------------------------
  RealVector P(N);	// eigen polynomial coefficients

  if(provable) {
    // --- PA = P_q(x) - x^{N/2} P_q(x^{-1})
    // --- P = PA/(1-x)
    RealVector PA(N+1);	// eigen polynomial coefficients
    PA(N/2) = 0.0;
    PA.topRows(N/2) = ESolver.eigenvectors().col(c).real();
    PA.bottomRows(N/2) = -ESolver.eigenvectors().col(c).real().reverse();
    P(0) = PA(0);
    for(i = 1; i<P.size(); ++i) {
      P(i) = P(i-1) + PA(i);
    }
  } else {
    P.topRows(N/2) = ESolver.eigenvectors().col(c).real();
    P.bottomRows(N/2) = ESolver.eigenvectors().col(c).real().reverse();
  }

  RealVector Q;		// eigenpolynomial with changed variable
  RealVector Z;		// scaled positions of zeroes
  long double alpha;    // scaling factor of polynomial
  long double s;	// scale
  k.resize(c);
  i = 0;
  alpha = 2.0/3.0;
  if(P.size() > 64) {
    while(i < c && alpha > 1e-8) {
      change_variable(P,alpha,Q);
      j = Q.size()-1;
      s = pow(2.0,j);
      while(j && fabs(Q(j))/s < 1e-18) {
	s /= 2.0;
	--j;
      }
      Q.conservativeResize(j+1);
      find_zeroes(Q, -0.5, 0.5, Z);
      for(j = Z.size()-1; j>=0; --j) {
	s = 1.0 - alpha*(1.0 -Z(j));
	k(i) = s;
	++i;
      }
      alpha /= 3.0;
    }
  } else {
    find_zeroes(P, 0.0, 1.0, k);
  }
}
Пример #8
0
int Sample(RealVector distribution)
{
	double v=1.0*rand()/(RAND_MAX+1),sum=0;

	for(int i=0;i<distribution.size();++i)
	{
		if(v>sum && v<=sum+distribution[i])
			return i;
		sum+=distribution[i];
	}
	return distribution.size();
}
Пример #9
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// propagateFwd
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void MultiLayerPerceptron::propagateForward( const RealVector& sourceLayer, RealVector& destLayer, const std::vector< RealVector >& weights )
{
   assert( weights.size() == sourceLayer.size() );

   for ( size_t iDestNeuron = 0; iDestNeuron < destLayer.size(); ++iDestNeuron )
   {
      destLayer[ iDestNeuron ] = 0;
      for ( size_t iSourceNeuron = 0; iSourceNeuron < sourceLayer.size(); ++iSourceNeuron )
      {
         assert( weights[ iSourceNeuron ].size() == destLayer.size() );
         destLayer[ iDestNeuron ] += sourceLayer[ iSourceNeuron ] * weights[ iSourceNeuron ][ iDestNeuron ];
      }
   }
}
Пример #10
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// propagateBackward
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void MultiLayerPerceptron::propagateBackward( const RealVector& sourceLayer, RealVector& destLayer, const std::vector< RealVector >& weights )
{
   assert( weights.size() == destLayer.size() );
   for ( size_t iDestNeuron = 0; iDestNeuron < destLayer.size(); ++iDestNeuron )
   {
      destLayer[ iDestNeuron ] = 0;
      size_t sourceSize = m_useBiasNodes ? sourceLayer.size() - 1 : sourceLayer.size();

      assert( weights[ iDestNeuron ].size() == sourceSize );
      for ( size_t iSourceNeuron = 0; iSourceNeuron < sourceSize; ++iSourceNeuron )
      {
         destLayer[ iDestNeuron ] += sourceLayer[ iSourceNeuron ] * weights[ iDestNeuron ][ iSourceNeuron ];
      }
   }
}
Пример #11
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// devPlotExport
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void DevGui::devPlotExport()
{
   RealVector x = Utils::createRangeReal( -2, 2, 100 );
   RealVector y( x.size() );
   for ( size_t i = 0; i < x.size(); ++i )
   {
      y[ i ] = x[ i ] * x[ i ];
   }

   Plotting::Plot2D& plot = gPlotFactory().createPlot( "NewPlot" );
   gPlotFactory().createGraph( x, y );

   plot.setXAxisTitle( "This is the x-axis" );
   plot.setYAxisTitle( "This is the y-axis" );
}
Пример #12
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// isEqual
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool isEqual( const RealVector& x, const RealVector& y )
{
   if ( x.size() != y.size() )
   {
      return false;
   }
   for ( size_t i = 0; i < x.size(); ++i )
   {
      if ( x[ i ] != y [ i ] )
      {
         return false;
      }
   }
   return true;
}
Пример #13
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// applyActivationFunc
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void MultiLayerPerceptron::applyActivationFunc( const RealVector& neuronActivation, RealVector& neuronResponse )
{
   for ( size_t i = 0; i < neuronActivation.size(); ++i )
   {
      neuronResponse[ i ] = tanh( neuronActivation[ i ] );
   }
}
Пример #14
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// calculateGradient
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RealVector IObjectiveFunction::calculateGradient( const RealVector& x, double delta ) const
{
   RealVector result( x.size() );

   double objValAtX = evaluate( x );
   RealVector xPrime( x );

   for ( size_t i = 0; i < x.size(); ++i )
   {
      xPrime[i] += delta;
      result[i] = ( evaluate( xPrime ) - objValAtX ) / delta;
      xPrime[i] -= delta;
   }

   return result;
}
Пример #15
0
 void to_vector(RealVector& result, const RowT& row)
 {
   const Uint row_size = row.size();
   cf3_assert(result.size() >= row_size);
   for(Uint i =0; i != row_size; ++i)
     result[i] = row[i];
 }
RealVector HyperbolicEquationUpwindRHSFunction::evalF(Real time, RealVector& y){

	// I use a uniform space grid
	Real dx = (xL - x0) / Nx;
	Integer y_size = y.size();
	RealVector F(y_size);
	F.fill(0.0);



	//! For the first node we impose that the value is equal to the BC value.
	F(0) = bc_func->getValue(time);
	for (Integer i = 1; i <= Nx-2; ++i){
		Real yi = y(i);
		Real a = flux_func->evalFderivative(time,yi);
		if ( a > 0.0 ){
			Real yi_prec = y(i-1);
			F(i) = 1.0/dx  * ( flux_func->evalF(time,yi_prec) - flux_func->evalF(time,yi) );
		}
		else{
			Real yi_succ = y(i+1);
			F(i) = 1.0/dx  * ( flux_func->evalF(time,yi) - flux_func->evalF(time,yi_succ) );
		}
	}

	return F;

}
Пример #17
0
Real GreensFunction3DRadInf::p_corr_table(Real theta, Real r, Real t, RealVector const& RnTable) const
{
    const Index tableSize(RnTable.size());
    if(tableSize == 0)
    {
        return 0.0;
    }

    Real result(0.0);


    Real sin_theta;
    Real cos_theta;
    sincos(theta, &sin_theta, &cos_theta);

    RealVector lgndTable(tableSize);
    gsl_sf_legendre_Pl_array(tableSize-1, cos(theta), &lgndTable[0]);


    const Real p(funcSum_all(boost::bind(&GreensFunction3DRadInf::
                                            p_corr_n,
                                            this,
                                            _1, RnTable, lgndTable),
                               tableSize));

    result = - p * sin_theta;

    result /= 4.0 * M_PI * sqrt(r * r0);

    return result;
}
Пример #18
0
///////////////////////////////////////////////////////////////////////////////
//
/// Finds the zeroes of P(x) in the domain \f$a < x <= b\f$, putting the
/// results into Z
//
///////////////////////////////////////////////////////////////////////////////
void AntisymmetricExpFit::find_zeroes(RealVector &P, const Real &a, const Real &b, RealVector &Z) {
  const int N = P.size();
  RealMatrix cm(N-1,N-1);
  int i,j;

  // --- Form the companion matrix
  // -----------------------------
  cm.fill(0.0);
  for(j=0; j<N-1; ++j) {
    cm(0,j) = -P(N-2-j)/P(N-1);
  }
  for(i=1; i<N-1; ++i) {	
    cm(i,i-1) = 1.0;
  }

  // --- find eigenvalues of
  // --- companion matrix and
  // --- extract roots in [0:1]
  // --------------------------
  EigenSolver<RealMatrix> roots(cm,false);

  Z.resize(N);
  i = 0;
  for(j = 0; j<roots.eigenvalues().size(); ++j) {
    if(fabs(roots.eigenvalues()(j).imag()) < 1e-15 &&
       roots.eigenvalues()(j).real() > a &&
       roots.eigenvalues()(j).real() <= b) {
      Z(i) = roots.eigenvalues()(j).real();
      ++i;
    }
  }
  Z.conservativeResize(i);
}
Пример #19
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// scale
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void scale( RealVector& x, double lambda )
{
   for ( size_t i = 0; i < x.size(); ++i )
   {
      x[ i ] *= lambda;
   }
}
void TRBDF2SolverNewtonFunction::evalF(RealVector& Z, RealVector& F){

	Bool refining = (ref.all() == false);

	if(refining == false){
		Integer n = Z.size();
		RealVector Y(n);
		Y.fill(0.0);
		F.fill(0.0);

		Y = ya + d * Z;

		RealVector f  = fun->evalF(tnp,Y);

		F = Z  - dt * f;
	}
	else{

		Integer n = ref.cast<Real>().size();
		RealVector Y(n);
		Y.fill(0.0);
		F.fill(0.0);

		RealVector Z_long = VectorUtility::vectorProlong(Z,ref);

		Y = ya + d * Z_long;

		RealVector f  = fun->evalF(tnp,Y,ref);

		F = Z  - dt * f;
	}
}
Пример #21
0
 bool LibSVMDataModel::isSupportVector(RealVector alphas) {
     for (size_t i = 0; i < alphas.size(); i++) {
         if (alphas[i] != 0)
             return true;
     }
     return false;
 }
Пример #22
0
Real GreensFunction3DRadInf::ip_corr_table(Real theta, Real r,
                                            Real t, RealVector const& RnTable) const
{
    const Index tableSize(RnTable.size());
    if(tableSize == 0)
    {
        return 0.0;
    }

    const Real cos_theta(cos(theta));
    
    // lgndTable is offset by 1. lengTable[0] -> n = -1

    RealVector lgndTable(tableSize + 2);
    lgndTable[0] = 1.0; // n = -1
    gsl_sf_legendre_Pl_array(tableSize, cos_theta, &lgndTable[1]);

    const Real p(funcSum_all(boost::bind(&GreensFunction3DRadInf::
                                            ip_corr_n,
                                            this,
                                            _1, RnTable, lgndTable),
                               tableSize));

    const Real result(- p / (4.0 * M_PI * sqrt(r * r0)));
    return result;
}
Пример #23
0
std::size_t GlobalNumberingNodes::hash_value(const RealVector& coords)
{
  std::size_t seed=0;
  for (Uint i=0; i<coords.size(); ++i)
    boost::hash_combine(seed,coords[i]);
  return seed;
}
inline void copy(const boost_constrow_t& vector, RealVector& realvector)
{
  cf3_assert(realvector.size() == vector.size());
  for (Uint row=0; row<realvector.rows(); ++row)
  {
    realvector[row] = vector[row];
  }
}
inline void copy(const RealVector& realvector, boost_row_t& vector)
{
  cf3_assert(vector.size() == realvector.size());
  for (Uint row=0; row<realvector.rows(); ++row)
  {
    vector[row] = realvector[row];
  }
}
Пример #26
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// sumElements
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
double sumElements( const RealVector& x )
{
   double sum = 0;
   for ( size_t i = 0; i < x.size(); ++i )
   {
      sum += x[ i ];
   }
   return sum;
}
Пример #27
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// proposeNew
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RealVector McmcOptimiser::proposeNew( const RealVector& x, double stepSize )
{
   RealVector result( x );
   for ( size_t i = 0; i < x.size(); ++i )
   {
      result[ i ] += stepSize * m_random.uniform( -1, 1 ) * stepSize;
   }
   return result;
}
Пример #28
0
// Set the levels radiating from the level at the given point, 
// with the given distance between levels.
//
void Eigenvalue_Contour::set_levels_from_point(const FluxFunction *f, const AccumulationFunction *a, GridValues &g,
                                               int fam, const RealVector &p, double delta_l){
    g.fill_eigenpairs_on_grid(f, a);
    gv = &g;




    family = fam;

    levels.clear();

    double JF[4], JG[4];

    f->fill_with_jet(p.size(), ((RealVector)p).components(), 1, 0, JF, 0);
    a->fill_with_jet(p.size(), ((RealVector)p).components(), 1, 0, JG, 0);

    std::vector<eigenpair> e;
    Eigen::eig(p.size(), JF, JG, e);
    double level = e[family].r;

    double now_level = level - delta_l;

    double min_lambda, max_lambda;
    find_minmax_lambdas(family, min_lambda, max_lambda);

    // Downwards...
    while (now_level >= min_lambda){
        levels.push_back(now_level);
        now_level -= delta_l;
    }

    now_level = level;

    // Upwards
    while (now_level <= max_lambda){
        levels.push_back(now_level);
        now_level += delta_l;
    }

    std::sort(levels.begin(), levels.end()); printf("levels.size() = %d\n", levels.size());

    return;
}
RealVector HyperbolicEquationRusanovFlux::evalF(Real time, RealVector& y){

	//! I use a uniform space grid
	Real dx = (xL - x0) / Nx;
	Integer y_size = y.size();
	RealVector F(y_size);
	F.fill(0.0);



	//! For the first node we impose that the value is equal to the BC value.

		Integer i=0;
		Real y0 = bc_func->getValue(time); 
		Real yi_succ = y(i+1);

		Real f_deriv_i = fabs(flux_func-> evalFderivative(time,y0));
		Real f_deriv_isucc = fabs(flux_func-> evalFderivative(time,yi_succ));

		Real f_prev =  flux_func->evalF(time,y0);
		Real f_succ =  0.5*(flux_func->evalF(time,y0) + flux_func->evalF(time,yi_succ) - ( std::max( f_deriv_i, f_deriv_isucc))*(yi_succ-y0));

		F(0) = - 1.0/dx  * (f_succ - f_prev );

	for (Integer i = 1; i < Nx-1; ++i){

		//! We use a finite volume method with Rusanov flux

		Real yi = y(i);
		Real yi_succ = y(i+1);
		Real yi_prev = y(i-1);

		Real f_deriv_i = fabs(flux_func-> evalFderivative(time,yi));
		Real f_deriv_isucc = fabs(flux_func-> evalFderivative(time,yi_succ));
		Real f_deriv_iprev = fabs(flux_func-> evalFderivative(time,yi_prev));

		Real f_prev =  0.5*(flux_func->evalF(time,yi_prev) + flux_func->evalF(time,yi) - (std::max( f_deriv_i, f_deriv_iprev))*(yi-yi_prev));
		Real f_succ =  0.5*(flux_func->evalF(time,yi) + flux_func->evalF(time,yi_succ) - ( std::max( f_deriv_i, f_deriv_isucc))*(yi_succ-yi));

		F(i) = - 1.0/dx  * (f_succ - f_prev );
	}

	Real yn = y(Nx-1);
	Real yi_prev = y(Nx-2);

	f_deriv_i = fabs(flux_func-> evalFderivative(time,yn));
	Real f_deriv_iprev = fabs(flux_func-> evalFderivative(time,yi_prev));

	f_prev =  0.5*(flux_func->evalF(time,yi_prev) + flux_func->evalF(time,yn) - (std::max( f_deriv_i, f_deriv_iprev))*(yn-yi_prev));
	f_succ =  flux_func->evalF(time,yn);

	F(Nx-1) = - 1.0/dx  * (f_succ - f_prev );

	return F;

}
    /* Returns i'th r-independent term of p_int_r_i.
       Term is created if not in table. */
    Real get_p_int_r_Table_i( uint& i, Real const& t, RealVector& table) const
    {
        if( i >= table.size() )
        {
            calculate_n_roots( i+1 );
            create_p_int_r_Table( t, table );
        }

        return table[i];
    }