Exemple #1
0
void FactorAnalysisStat::getXEstimate(){
	if (verbose) cout << "(FactorAnalysisStat) Compute X Estimate "<<endl;
	RealVector <double> AUX;
	AUX.setSize(_rang);
	XLine *pline;
	String *pFile;

	_matX.setAllValues(0.0);

	double *X=_matX.getArray();	
	double *U=_matU.getArray();
	double *S_X_h=_matS_X_h.getArray();
	double *aux=AUX.getArray();
	double *super_invvar=_super_invvar.getArray();
	
	fileList.rewind();
	while((pline=fileList.getLine())!=NULL) {
		while((pFile=pline->getElement())!=NULL) {
		    unsigned long sent=_ndxTable.sessionNb(*pFile);
			AUX.setAllValues(0.0);
			for(unsigned long i=0;i<_rang;i++)
				for(unsigned long k=0;k<_supervsize;k++) 
					aux[i]+= U[k*_rang+i]*super_invvar[k]*S_X_h[sent*_supervsize+k];	
			double *l_h_inv=_l_h_inv[sent].getArray();	
			for(unsigned long i=0;i<_rang;i++)
				for(unsigned long k=0;k<_rang;k++) 
					X[sent*_rang+i]+=l_h_inv[i*_rang+k]*aux[k];
			sent++;
		}
	}
};
Exemple #2
0
LPSolver::LPSolver(const RealMatrix& _A, const RealVector& _b, const RealVector& _c)
{
    N = B = 0;
    
    eps = 10E-07;
    
    m = _b.size();
    n = _c.size();
    D = RealMatrix(m+2, n+2);

    B = new int[m];
    N = new int[n+1];
    
    // copy matrix
    for(int i=0; i<m; ++i)
        for(int j=0; j<n; ++j)
            D(i,j) = _A(i,j);
    for(int i=0; i<m; ++i)
    {
        B[i] = n+i;
        D(i,n) = -1;
        D(i,n+1) = _b(i);
    }
    for(int j=0; j<n; ++j)
    {
        N[j] = j;
        D(m,j) = -_c(j);
    }
    N[n] = -1;
    D(m+1, n) = 1;
}
void TRBDF2SolverNewtonFunction::evalF(RealVector& Z, RealVector& F){

	Bool refining = (ref.all() == false);

	if(refining == false){
		Integer n = Z.size();
		RealVector Y(n);
		Y.fill(0.0);
		F.fill(0.0);

		Y = ya + d * Z;

		RealVector f  = fun->evalF(tnp,Y);

		F = Z  - dt * f;
	}
	else{

		Integer n = ref.cast<Real>().size();
		RealVector Y(n);
		Y.fill(0.0);
		F.fill(0.0);

		RealVector Z_long = VectorUtility::vectorProlong(Z,ref);

		Y = ya + d * Z_long;

		RealVector f  = fun->evalF(tnp,Y,ref);

		F = Z  - dt * f;
	}
}
///////////////////////////////////////////////////////////////////////////////
//
/// Finds the zeroes of P(x) in the domain \f$a < x <= b\f$, putting the
/// results into Z
//
///////////////////////////////////////////////////////////////////////////////
void AntisymmetricExpFit::find_zeroes(RealVector &P, const Real &a, const Real &b, RealVector &Z) {
  const int N = P.size();
  RealMatrix cm(N-1,N-1);
  int i,j;

  // --- Form the companion matrix
  // -----------------------------
  cm.fill(0.0);
  for(j=0; j<N-1; ++j) {
    cm(0,j) = -P(N-2-j)/P(N-1);
  }
  for(i=1; i<N-1; ++i) {	
    cm(i,i-1) = 1.0;
  }

  // --- find eigenvalues of
  // --- companion matrix and
  // --- extract roots in [0:1]
  // --------------------------
  EigenSolver<RealMatrix> roots(cm,false);

  Z.resize(N);
  i = 0;
  for(j = 0; j<roots.eigenvalues().size(); ++j) {
    if(fabs(roots.eigenvalues()(j).imag()) < 1e-15 &&
       roots.eigenvalues()(j).real() > a &&
       roots.eigenvalues()(j).real() <= b) {
      Z(i) = roots.eigenvalues()(j).real();
      ++i;
    }
  }
  Z.conservativeResize(i);
}
Exemple #5
0
RealVector realVector( double x0, double x1 )
{
   RealVector result;
   result.push_back( x0 );
   result.push_back( x1 );
   return result;
}
RealMatrixSparse HyperbolicEquationUpwindRHSFunction::evaldFdY(Real time, RealVector& y){
	///
	/// this is not the exact Jacobian!
	///

	Integer y_size = y.size();
	RealMatrixSparse J(y_size,y_size);

	Integer col = 0;
	Real epsi = 1.e-3;
	Real iepsi = 1.0 / 1.e-3;
	RealVector F = evalF(time,y);

	for (Integer j = 0; j < y_size; ++j){

		RealVector yd = y;
		yd(j) = yd(j) + epsi;

		RealVector Fd = evalF(time,yd);

		for(Integer i=0; i< Fd.size(); i++){
			J.insert(i,col) = (Fd[i] - F[i]) * iepsi;
		}
		col = col + 1;
	}

	return J;
}
RealMatrixSparse HyperbolicEquationUpwindRHSFunction::evaldFdY(Real time, RealVector& y,IntegerVector& ref){

	// this is not the exact Jacobian!

		Integer y_size = y.size();
		Integer elem_ref = ref.sum();
		RealMatrixSparse J(elem_ref,elem_ref);


		Integer col = 0;
		Real epsi = 1.e-3;
		Real iepsi = 1.0 / 1.e-3;
		RealVector F = evalF(time,y,ref);

		for (Integer j = 0; j < y_size; ++j){
			if(ref(j) == 1){
				RealVector yd = y;
				yd(j) = yd(j) + epsi;

				RealVector Fd = evalF(time,yd,ref);

				for(Integer i=0; i< Fd.size(); i++){
					J.insert(i,col) = (Fd[i] - F[i]) * iepsi;
				}
				col = col + 1;
			}
		}

		return J;
}
Exemple #8
0
void FactorAnalysisStat::substractChannelStats(){
	if (verbose) cout <<"(FactorAnalysisStat) Compute and Substract Channel FA Stats... "<<endl;	
	RealVector <double> UX;UX.setSize(_supervsize); UX.setAllValues(0.0);double* ux=UX.getArray();		
		
	// Compute Occupations and Statistics
	unsigned long loc=0;
	unsigned long sent=0; 
	XLine *pline; String *pFile; 
	fileList.rewind();
	
	double *super_mean=_super_mean.getArray();
	double *N_h=_matN_h.getArray(); 
	double *S_X=_matS_X.getArray();

	while((pline=fileList.getLine())!=NULL) { 		
		while((pFile=pline->getElement())!=NULL) {
			this->getUX(UX,*pFile);
			for(unsigned long k=0;k<_supervsize;k++) 
				ux[k]+=super_mean[k];
			
			for(unsigned long k=0;k<_mixsize;k++)
				for (unsigned long i=0;i<_vsize;i++)
					S_X[loc*_supervsize+(k*_vsize+i)]-= N_h[sent*_mixsize+k]*ux[i+k*_vsize];
			sent++;
		}
		loc++;
	}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// evaluate
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RealVector MultiLayerPerceptron::evaluate( const RealVector& x )
{
   /// Assert validity of input.
   assert( !m_useBiasNodes && x.size() == m_input.size() || m_useBiasNodes && x.size() == m_input.size() - 1 );

   /// Set input.
   for ( size_t i = 0; i < x.size(); ++i )
   {
      m_input[ i ] = x[ i ];
   }

   /// Simple forward propagation when there are no hidden layers.
   if ( m_y.size() == 0 )
   {
      propagateForward( m_input, m_output, m_weights[ 0 ] );
   }
   else
   {
      /// Propage from input node to first hidden layer.
      propagateForward( m_input, m_y[ 0 ], m_weights[ 0 ] );
      applyActivationFunc( m_y[ 0 ], m_x[ 0 ] );

      /// Propagate to last hidden layer.
      for ( size_t iHiddenLayer = 0; iHiddenLayer < m_y.size() - 1; ++iHiddenLayer )
      {
         propagateForward( m_x[ iHiddenLayer ], m_y[ iHiddenLayer + 1 ], m_weights[ iHiddenLayer + 1 ] );
         applyActivationFunc( m_y[ iHiddenLayer + 1 ], m_x[ iHiddenLayer + 1 ] );
      }

      /// Propagate to output nodes.
      propagateForward( m_x.back(), m_output, m_weights.back() );
   }

   return m_output;
}
inline void copy(const RealVector& realvector, boost_row_t& vector)
{
  cf3_assert(vector.size() == realvector.size());
  for (Uint row=0; row<realvector.rows(); ++row)
  {
    vector[row] = realvector[row];
  }
}
inline void copy(const boost_constrow_t& vector, RealVector& realvector)
{
  cf3_assert(realvector.size() == vector.size());
  for (Uint row=0; row<realvector.rows(); ++row)
  {
    realvector[row] = vector[row];
  }
}
Exemple #12
0
double evaluator(RealVector &v) {
	RealVector obj;
	
	for (int i = 0; i < 3; i++) {
		obj.push_back(5.0);
	}
	
	return v.distance(obj);
}
Exemple #13
0
// Compute supervector of client M_s_h=M+Dy_s+Ux and get a model
void FactorAnalysisStat::getSpeakerModel(MixtureGD & M,String& file) {	
	RealVector <double> Sp;Sp.setSize(_supervsize);	
	RealVector <double> ux;ux.setSize(_supervsize);				
	this->getUX(ux,file);
	this->getMplusDY(Sp,file);
	for (unsigned long i=0;i<_supervsize;i++)
		Sp[i]+=ux[i];
	svToModel(Sp,M);
};
Exemple #14
0
RealVector realVector( double x0, double x1, double x2, double x3, double x4 )
{
   RealVector result;
   result.push_back( x0 );
   result.push_back( x1 );
   result.push_back( x2 );
   result.push_back( x3 );
   result.push_back( x4 );
   return result;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// constructor
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
LinearInterpolator::LinearInterpolator( const RealVector& x, const RealVector& y )
{
   assert( x.size() > 0 );
   assert( x.size() == y.size() );

   SortCache sc( x );
   m_x = sc.applyTo( x );
   m_y = sc.applyTo( y );

   m_xWidth = m_x.back() - m_x.front();
}
Exemple #16
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// calcDerivative
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
RealVector calcDerivative( const RealVector& x )
{
   RealVector result( x.size() );
   for ( size_t i = 1; i < x.size() - 1; ++i )
   {
      result[i] = ( x[i+1] - x[i-1] ) / 2;
   }
   result[0] = x[1] - x[0];
   result[ x.size() - 1 ] = x[ x.size() - 1 ] - x[ x.size() - 2 ];
   return result;
}
///////////////////////////////////////////////////////////////////////////////
//
/// Find zeroes of the polynomial \f$y = sum_i e_ix^i\f$,
/// where \f$e\f$ is the \f$c^{th}\f$ eigenvector, and put any zeroes
/// that lie in the interval [0:1] into \c k.
///
/// \param c The identifier of the eigenvector to find zeroes of.
//
///////////////////////////////////////////////////////////////////////////////
void AntisymmetricExpFit::fit_exponents(int c) {
  const int 	N = 2.0*ESolver.eigenvectors().rows();
  int		i,j;

  // --- Construct the eigenpolynomial
  // --- from the eigenvalues
  // ---------------------------------
  RealVector P(N);	// eigen polynomial coefficients

  if(provable) {
    // --- PA = P_q(x) - x^{N/2} P_q(x^{-1})
    // --- P = PA/(1-x)
    RealVector PA(N+1);	// eigen polynomial coefficients
    PA(N/2) = 0.0;
    PA.topRows(N/2) = ESolver.eigenvectors().col(c).real();
    PA.bottomRows(N/2) = -ESolver.eigenvectors().col(c).real().reverse();
    P(0) = PA(0);
    for(i = 1; i<P.size(); ++i) {
      P(i) = P(i-1) + PA(i);
    }
  } else {
    P.topRows(N/2) = ESolver.eigenvectors().col(c).real();
    P.bottomRows(N/2) = ESolver.eigenvectors().col(c).real().reverse();
  }

  RealVector Q;		// eigenpolynomial with changed variable
  RealVector Z;		// scaled positions of zeroes
  long double alpha;    // scaling factor of polynomial
  long double s;	// scale
  k.resize(c);
  i = 0;
  alpha = 2.0/3.0;
  if(P.size() > 64) {
    while(i < c && alpha > 1e-8) {
      change_variable(P,alpha,Q);
      j = Q.size()-1;
      s = pow(2.0,j);
      while(j && fabs(Q(j))/s < 1e-18) {
	s /= 2.0;
	--j;
      }
      Q.conservativeResize(j+1);
      find_zeroes(Q, -0.5, 0.5, Z);
      for(j = Z.size()-1; j>=0; --j) {
	s = 1.0 - alpha*(1.0 -Z(j));
	k(i) = s;
	++i;
      }
      alpha /= 3.0;
    }
  } else {
    find_zeroes(P, 0.0, 1.0, k);
  }
}
Exemple #18
0
void *Uthread(void *threadarg) {
	struct Uthread_data *my_data;
	my_data = (struct Uthread_data *) threadarg;
	// Get data
	double *U = my_data->U;
	double *N_h=my_data->N_h;	
	double *S_X_h=my_data->S_X_h;	
	double *X=my_data->X;
	unsigned long _rang=my_data->rang;
	unsigned long _supervsize=my_data->sv;	
	unsigned long _nb_sent=my_data->nbsent;
	unsigned long _vsize=my_data->vsize;
	unsigned long _mixsize=my_data->mixsize;
	unsigned long idxBottom=my_data->idxBottom;
	unsigned long idxUp=my_data->idxUp;
	RefVector <DoubleSquareMatrix>& _l_h_inv=*(my_data->L);

	// Routine
	DoubleSquareMatrix L(_rang);
	DoubleSquareMatrix L_Inv(_rang);
	RealVector <double> R;
	R.setSize(_rang);
	double *RV=R.getArray();
	double *LV=L.getArray();	
	for (unsigned long g=idxBottom;g<idxUp;g++) {
		L.setAllValues(0.0);
		// Estimate LU for the gaussian g
		for(unsigned long j=0;j<_nb_sent;j++){
			double *l_h_inv=_l_h_inv[j].getArray();
			for(unsigned long k=0;k<_rang;k++)
				for(unsigned long l=0;l<_rang;l++) 
					LV[k*_rang+l]+=(l_h_inv[k*_rang+l]+X[j*_rang+k]*X[j*_rang+l])*N_h[j*_mixsize+g];
		}
		L.invert(L_Inv); 
		double *L_InvV=L_Inv.getArray();	
		
		// Estimate LU for the line lineNum
		for (unsigned long i=0;i<_vsize;i++) {
			unsigned long lineNum=_vsize*g+i;			
			// estime RU i
			R.setAllValues(0.0);
			for(unsigned long j=0;j<_nb_sent;j++)
				for(unsigned long k=0;k<_rang;k++)
					RV[k]+=S_X_h[j*_supervsize+lineNum]*X[j*_rang+k];
			// estime U i
			for(unsigned long j=0;j<_rang;j++)
				for(unsigned long k=0;k<_rang;k++) 
					U[lineNum*_rang+j]+=L_InvV[j*_rang+k]*RV[k];
		}
	}
	//
	pthread_exit((void*) 0);
	return(void*)0;
}
RealVector <double> TopGauss::get(MixtureGD & UBM,FeatureServer &fs,Config & config){
	RealVector <double> llkv;
	for (unsigned long i=0;i<fs.getSourceCount();i++) {
		String filename=fs.getNameOfASource(i);
		if (verbose) cout << "(TopGauss::get on FeatureServer) File ["<<filename<<"] ... ";	
		this->read(filename,config); // this should be removed!! but when passing a feature server things get worse			
		llkv.addValue(get(UBM,fs,filename,config));
		if (verbose) cout <<_nbgcnt/_nt <<" per frames LLK="<<llkv[i]<<endl;	
	}
return llkv;
}
void SharedSurfpackApproxData::
merge_variable_arrays(const RealVector& cv,  const IntVector& div,
		      const RealVector& drv, RealArray& ra)
{
  size_t num_cv = cv.length(), num_div = div.length(), num_drv = drv.length(),
         num_v  = num_cv + num_div + num_drv;
  ra.resize(num_v);
  if (num_cv)   copy_data_partial(cv,  ra, 0);
  if (num_div) merge_data_partial(div, ra, num_cv);
  if (num_drv)  copy_data_partial(drv, ra, num_cv+num_div);
}
Exemple #21
0
    std::ostream & operator<<(std::ostream &os, const Options& opt) {
        bool hasValue = false;
        for(Options::BoolMap::const_iterator iter = opt._boolMap.begin(); iter != opt._boolMap.end(); ++iter) {
            if (!hasValue) {
                os << "Options:";
                hasValue = true;
            }
            if (iter->first[0] != '_') {
                os << " " << (iter->second ? "+" : "-") << iter->first;
            }
        }
        if (hasValue) {
            os << endl;
            hasValue = false;
        }

        for(Options::IntMap::const_iterator iter = opt._intMap.begin(); iter != opt._intMap.end(); ++iter) {
            os << iter->first << " int " << iter->second << endl;
        }
        for(Options::RealMap::const_iterator iter = opt._realMap.begin(); iter != opt._realMap.end(); ++iter) {
            os << iter->first << " real " << iter->second << endl;
        }
        for(Options::StringMap::const_iterator iter = opt._stringMap.begin(); iter != opt._stringMap.end(); ++iter) {
            os << iter->first << " string " << iter->second << endl;
        }

        for(Options::IntVectorMap::const_iterator iter = opt._intVectorMap.begin(); iter != opt._intVectorMap.end(); ++iter) {
            IntVector vector = iter->second;
            os << iter->first << " ints";
            for (int i = 0; i < vector.size(); i++) {
                os << " " << vector[i];
            }
            os << endl;
        }

        for(Options::RealVectorMap::const_iterator iter = opt._realVectorMap.begin(); iter != opt._realVectorMap.end(); ++iter) {
            RealVector vector = iter->second;
            os << iter->first << " reals";
            for (int i = 0; i < vector.size(); i++) {
                os << " " << vector[i];
            }
            os << endl;
        }

        for(Options::StringVectorMap::const_iterator iter = opt._stringVectorMap.begin(); iter != opt._stringVectorMap.end(); ++iter) {
            StringVector vector = iter->second;
            os << iter->first << " strings " << vector.size() << endl;
            for (int i = 0; i < vector.size(); i++) {
                os << vector[i] << endl;
            }
        }
        return os;
    }
int Sample(RealVector distribution)
{
	double v=1.0*rand()/(RAND_MAX+1),sum=0;

	for(int i=0;i<distribution.size();++i)
	{
		if(v>sum && v<=sum+distribution[i])
			return i;
		sum+=distribution[i];
	}
	return distribution.size();
}
Exemple #23
0
RealVector realVector( double x0, double x1, double x2, double x3, double x4, double x5, double x6, double x7 )
{
   RealVector result;
   result.push_back( x0 );
   result.push_back( x1 );
   result.push_back( x2 );
   result.push_back( x3 );
   result.push_back( x4 );
   result.push_back( x5 );
   result.push_back( x6 );
   result.push_back( x7 );
   return result;
}
Exemple #24
0
void RNNet::weightedParameterDerivative(
	BatchInputType const& patterns, BatchInputType const& coefficients, 
	State const& state, RealVector& gradient
)const{
	//SIZE_CHECK(pattern.size() == coefficients.size());
	InternalState const& s = state.toState<InternalState>();
	gradient.resize(numberOfParameters());
	gradient.clear();
	
	std::size_t numUnits = mpe_structure->numberOfUnits();
	std::size_t numNeurons = mpe_structure->numberOfNeurons();
	std::size_t warmUpLength=m_warmUpSequence.size();
	for(std::size_t b = 0; b != patterns.size(); ++b){
		Sequence const& sequence = s.timeActivation[b];
		std::size_t sequenceLength = s.timeActivation[b].size();
		RealMatrix errorDerivative(sequenceLength,numNeurons);
		errorDerivative.clear();
		//copy errors
		for (std::size_t t = warmUpLength+1; t != sequenceLength; ++t)
			for(std::size_t i = 0; i != outputSize(); ++i)
				errorDerivative(t,i+numNeurons-outputSize())=coefficients[b][t-warmUpLength-1](i);
		
		//backprop through time
		for (std::size_t t = (int)sequence.size()-1; t > 0; t--){
			for (std::size_t j = 0; j != numNeurons; ++j){
				double derivative = mpe_structure->neuronDerivative(sequence[t](j+mpe_structure->inputs()+1));
				errorDerivative(t,j)*=derivative;
			}
			noalias(row(errorDerivative,t-1)) += prod(
				trans(columns(mpe_structure->weights(), inputSize()+1,numUnits)),
				row(errorDerivative,t)
			);
		}
		
		
		//update gradient for batch element i
		std::size_t param = 0;
		for (std::size_t i = 0; i != numNeurons; ++i){
			for (std::size_t j = 0; j != numUnits; ++j){
				if(!mpe_structure->connection(i,j))continue;

				for(std::size_t t=1;t != sequence.size(); ++t)
					gradient(param)+=errorDerivative(t,i) * sequence[t-1](j);
				
				++param;
			}
		}
		//sanity check
		SIZE_CHECK(param == mpe_structure->parameters());
	}
}
void TRBDF2SolverNewtonFunction::evalFJ(RealVector& Z, RealVector& F, RealMatrixSparse& J){

	Bool refining = (ref.all() == false);

	if(refining == false){

		Integer n = Z.size();
		RealVector Y(n);
		Y.fill(0.0);
		F.fill(0.0);

		RealMatrixSparse DZ_DZ(n,n);  //Identity matrix
		DZ_DZ.setIdentity();

		Y = ya + d * Z;

		RealVector f  = fun->evalF(tnp,Y);
		RealMatrixSparse Df_DY = fun->evaldFdY(tnp,Y);


		F = Z     - dt *     f;
		J = DZ_DZ - dt * d * Df_DY;    //J = DF_DZ

	}
	else{

		Real d = (2 - sqrt(2))/2;

		Integer n = ref.cast<Real>().size();
		RealVector Y(n);
		Y.fill(0);
		F.fill(0);

		Integer elem_ref = ref.sum();
		RealMatrixSparse DZ_DZ;
		DZ_DZ.resize(elem_ref,elem_ref);
		DZ_DZ.setIdentity();


		RealVector Z_long = VectorUtility::vectorProlong(Z,ref);

		Y = ya + d * Z_long;

		RealVector f  = fun->evalF(tnp,Y,ref);
		RealMatrixSparse Df_DY = fun->evaldFdY(tnp,Y,ref);

		F = Z     - dt *     f;
		J = DZ_DZ - dt * d * Df_DY;  //J = DF_DZ
	}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// propagateFwd
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void MultiLayerPerceptron::propagateForward( const RealVector& sourceLayer, RealVector& destLayer, const std::vector< RealVector >& weights )
{
   assert( weights.size() == sourceLayer.size() );

   for ( size_t iDestNeuron = 0; iDestNeuron < destLayer.size(); ++iDestNeuron )
   {
      destLayer[ iDestNeuron ] = 0;
      for ( size_t iSourceNeuron = 0; iSourceNeuron < sourceLayer.size(); ++iSourceNeuron )
      {
         assert( weights[ iSourceNeuron ].size() == destLayer.size() );
         destLayer[ iDestNeuron ] += sourceLayer[ iSourceNeuron ] * weights[ iSourceNeuron ][ iDestNeuron ];
      }
   }
}
Exemple #27
0
 /**
  * Computes the trace of the product of two kernel operators, i.e. of
  * \f$ tr( X_1 Y_1 D_1 Y_1^\dagger X1^\dagger  X_2 Y_2 D_2 Y_2^\dagger X_2^\dagger) \f$
  *
  */
 static Scalar traceProduct(const FSpaceCPtr &fs,
                            
                            const FMatrixCPtr &mX1, 
                            const ScalarAltMatrix  &mY1,
                            const RealVector &mD1,
                            
                            const FMatrixCPtr &mX2, 
                            const ScalarAltMatrix  &mY2,
                            const RealVector &mD2) {
     
     ScalarMatrix m = fs->k(mX1, mY1, mX2, mY2);
     
     return (m.adjoint() * mD1.asDiagonal() * m * mD2.asDiagonal()).trace();
 }
Exemple #28
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// isEqual
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool isEqual( const RealVector& x, const RealVector& y )
{
   if ( x.size() != y.size() )
   {
      return false;
   }
   for ( size_t i = 0; i < x.size(); ++i )
   {
      if ( x[ i ] != y [ i ] )
      {
         return false;
      }
   }
   return true;
}
Exemple #29
0
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// devPlotExport
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void DevGui::devPlotExport()
{
   RealVector x = Utils::createRangeReal( -2, 2, 100 );
   RealVector y( x.size() );
   for ( size_t i = 0; i < x.size(); ++i )
   {
      y[ i ] = x[ i ] * x[ i ];
   }

   Plotting::Plot2D& plot = gPlotFactory().createPlot( "NewPlot" );
   gPlotFactory().createGraph( x, y );

   plot.setXAxisTitle( "This is the x-axis" );
   plot.setYAxisTitle( "This is the y-axis" );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// propagateBackward
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void MultiLayerPerceptron::propagateBackward( const RealVector& sourceLayer, RealVector& destLayer, const std::vector< RealVector >& weights )
{
   assert( weights.size() == destLayer.size() );
   for ( size_t iDestNeuron = 0; iDestNeuron < destLayer.size(); ++iDestNeuron )
   {
      destLayer[ iDestNeuron ] = 0;
      size_t sourceSize = m_useBiasNodes ? sourceLayer.size() - 1 : sourceLayer.size();

      assert( weights[ iDestNeuron ].size() == sourceSize );
      for ( size_t iSourceNeuron = 0; iSourceNeuron < sourceSize; ++iSourceNeuron )
      {
         destLayer[ iDestNeuron ] += sourceLayer[ iSourceNeuron ] * weights[ iDestNeuron ][ iSourceNeuron ];
      }
   }
}