Esempio n. 1
0
GMMExpectationMaximization::uint GMMExpectationMaximization::execute(const MatrixX & dataset)
{  
  const uint data_count = dataset.rows();
  const uint num_gaussians = m_means.size();
  const uint dim = dataset.cols();

  MatrixX pxi(data_count,num_gaussians);
  MatrixX pix(data_count,num_gaussians);
  VectorX pxidatatot(data_count);
  VectorX weights(num_gaussians);
  VectorX ex(data_count);
  MatrixX ts(dim,dim);
  VectorX dif(dim);

  Real prev_log_likelyhood = 1.0;
  
  uint it_num;
  for (it_num = 0; it_num < m_max_iterations; it_num++)
  {
    for (uint g = 0; g < num_gaussians; g++)
      weights[g] = m_weights[g];

    for (uint d = 0; d < data_count; d++)
      for (uint g = 0; g < num_gaussians; g++)
        pxi(d,g) = gauss(m_means[g],m_covs[g],dataset.row(d).transpose());

    pxidatatot = pxi * weights;
    Real log_likelyhood = pxidatatot.array().log().sum() / Real(data_count);

    if (it_num != 0 && (std::abs(log_likelyhood / prev_log_likelyhood - 1.0) < m_termination_threshold))
      break;
    prev_log_likelyhood = log_likelyhood;

    for (uint d = 0; d < data_count; d++)
      pix.row(d) = (pxi.row(d).transpose().array() * weights.array()).transpose() / pxidatatot[d];
    
    ex = pix.colwise().sum();

    for(uint g = 0; g < num_gaussians; g++)
    {
      m_weights[g] = ex[g] / Real(data_count);

      m_means[g] = (dataset.transpose() * pix.col(g)) / ex[g];

      ts = MatrixX::Zero(dim,dim);
      for (uint d = 0; d < data_count; d++)
      {
        dif = dataset.row(d).transpose() - m_means[g];
        ts.noalias() += (dif * dif.transpose()) * pix(d,g);
      }
      m_covs[g] = (ts / ex[g]) + MatrixX::Identity(dim,dim) * m_epsilon;
    }

    // interruption point here
    if (m_termination_handler && m_termination_handler->isTerminated())
      return it_num;
  }

  return it_num;
}
/**
  * In this test we check if the gradient is correct by appling
  * a finite difference method.
  */
TYPED_TEST(TestSecondOrderMultinomialLogisticRegression, Gradient) {
    // Gradient checking should only be made with a double type
    if (is_float<TypeParam>::value) {
        return;
    }

    // eta is typically of size KxC, where K is the number of topics and C the
    // number of different classes.
    // Here we choose randomly for conviency K=10 and C=5
    MatrixX<TypeParam> eta = MatrixX<TypeParam>::Random(10, 5);
    // X is of size KxD, where D is the total number of documents.
    // In our case we have chosen D=15
    MatrixX<TypeParam> X = MatrixX<TypeParam>::Random(10, 1);
    // y is vector of size Dx1
    VectorXi y(1);
    for (int i=0; i<1; i++) {
        y(i) = rand() % (int)5; 
    }
    std::vector<MatrixX<TypeParam> > X_var = {MatrixX<TypeParam>::Random(10, 10).array().abs()};

    TypeParam L = 1;
    SecondOrderLogisticRegressionApproximation<TypeParam> mlr(X, X_var, y, L);

    // grad is the gradient according to the equation
    // implemented in MultinomialLogisticRegression.cpp 
    // gradient function
    // grad is of same size as eta, which is KxC
    MatrixX<TypeParam> grad(10, 5);

    // Calculate the gradients
    mlr.gradient(eta, grad);

    // Grad's approximation
    TypeParam grad_hat;
    TypeParam t = 1e-6;

    for (int i=0; i < eta.rows(); i++) {
        for (int j=0; j < eta.cols(); j++) {
            eta(i, j) += t;
            TypeParam ll1 = mlr.value(eta);
            eta(i, j) -= 2*t;
            TypeParam ll2 = mlr.value(eta);

            // Compute gradients approximation
            grad_hat = (ll1 - ll2) / (2 * t);

            auto absolute_error = std::abs(grad(i, j) - grad_hat);
            if (grad_hat != 0) {
                auto relative_error = absolute_error / std::abs(grad_hat);
                EXPECT_TRUE(
                    relative_error < 1e-4 ||
                    absolute_error < 1e-5
                ) << relative_error << " " << absolute_error;
            }
            else {
                EXPECT_LT(absolute_error, 1e-5);
            }
        }
    }
}
bool BCCoreSiconos::callSolver(MatrixX& Mlcp, VectorX& b, VectorX& solution, VectorX& contactIndexToMu, ofstream& os)
{
#ifdef BUILD_BCPLUGIN_WITH_SICONOS
  int NC3 = Mlcp.rows();
  if(NC3<=0) return true;
  int NC = NC3/3;
  int CFS_DEBUG = 0;
  int CFS_DEBUG_VERBOSE = 0;
  if(CFS_DEBUG)
  {
    if(NC3%3 != 0           ){ os << "   warning-1 " << std::endl;return false;}
    if(       b.rows()!= NC3){ os << "   warning-2 " << std::endl;return false;}
    if(solution.rows()!= NC3){ os << "   warning-3 " << std::endl;return false;}
  } 
  for(int ia=0;ia<NC;ia++)for(int i=0;i<3;i++)prob->q [3*ia+i]= b(((i==0)?(ia):(2*ia+i+NC-1)));
  for(int ia=0;ia<NC;ia++)                    prob->mu[  ia  ]= contactIndexToMu[ia];
  prob->numberOfContacts = NC;

  if( USE_FULL_MATRIX )
  {
    prob->M->storageType = 0;
    prob->M->size0       = NC3;
    prob->M->size1       = NC3;
    double* ptmp = prob->M->matrix0 ;
    for(int ia=0;ia<NC;ia++)for(int i =0;i <3 ;i ++)
    {
      for(int ja=0;ja<NC;ja++)for(int j =0;j <3;j ++) 
      {
        ptmp[NC3*(3*ia+i)+(3*ja+j)]=Mlcp(((i==0)?(ia):(2*ia+i+NC-1)),((j==0)?(ja):(2*ja+j+NC-1)));
      }
    }
  }
  else
  {
    prob->M->storageType = 1;
    prob->M->size0       = NC3;
    prob->M->size1       = NC3;
    sparsify_A( prob->M->matrix1 , Mlcp , NC , &os);
  }
  
  fc3d_driver(prob,reaction,velocity,solops, numops);
  
  double* prea = reaction ;
  for(int ia=0;ia<NC;ia++)for(int i=0;i<3;i++) solution(((i==0)?(ia):(2*ia+i+NC-1))) = prea[3*ia+i] ;
  if(CFS_DEBUG_VERBOSE)
  {
    os << "=---------------------------------="<< std::endl; 
    os << "| res_error =" << solops->dparam[1] <<  std::endl;
    os << "=---------------------------------="<< std::endl; 
  }
#endif
  return true;
}
Esempio n. 4
0
GMMExpectationMaximization::Real GMMExpectationMaximization::getBIC(const MatrixX & dataset) const
{
  const uint dim = dataset.cols();
  const uint num_gaussians = m_means.size();

  Real number_of_parameters = (num_gaussians * dim * (dim + 1) / 2) + num_gaussians * dim + num_gaussians - 1;

  uint data_count = dataset.rows();
  Real sum = 0.0;
  
  for(uint i = 0; i < data_count; i++)
    sum += log(expectation(dataset.row(i).transpose()));

  return -sum + (number_of_parameters / 2.0) * log(Real(data_count));
}
const MatrixX& Jacobian::GetNullspace()
{
	if(computeNullSpace_)
	{
		computeNullSpace_ = false;
		/*jacobianInverseNoDls_ = jacobian_;
		PseudoInverse(jacobianInverseNoDls_); // tmp while figuring out how to chose lambda*/
		//ComputeSVD();
		MatrixX id = MatrixX::Identity(jacobian_.cols(), jacobian_.cols());
		ComputeSVD();
		//Eigen::JacobiSVD<MatrixX> svd(jacobian_, Eigen::ComputeThinU | Eigen::ComputeThinV);
		MatrixX res = MatrixX::Zero(id.rows(), id.cols());
		for(int i =0; i < svd_.matrixV().cols(); ++ i)
		{
			VectorX v = svd_.matrixV().col(i);
			res += v * v.transpose();
		}
		Identitymin_ = id - res;
		//Identitymin_ = id - (jacobianInverseNoDls_* jacobian_);
	}
	return Identitymin_;
}
Esempio n. 6
0
bool GaussianSet::setSpinDensityMatrix(const MatrixX &m)
{
  m_spinDensity.resize(m.rows(), m.cols());
  m_spinDensity = m;
  return true;
}
Esempio n. 7
0
void GMMExpectationMaximization::autoInitializeByEqualIntervals(uint num_gaussians,uint col,const MatrixX & dataset)
{
  uint data_count = dataset.rows();
  uint dim = dataset.cols();

  std::vector<std::vector<uint> > index(num_gaussians);
  for(uint g = 0; g < num_gaussians; g++)
    index[g].reserve(data_count / num_gaussians);

  m_weights.clear();
  m_weights.resize(num_gaussians);
  m_means.clear();
  m_means.resize(num_gaussians,VectorX::Zero(dim));
  m_covs.clear();
  m_covs.resize(num_gaussians,MatrixX::Zero(dim,dim));

  // find max and min value for column col
  Real cmax = dataset(0,col);
  Real cmin = dataset(0,col);
  for(uint n = 1; n < data_count; n++)
  {
    if (dataset(n,col) > cmax) cmax = dataset(n,col);
    if (dataset(n,col) < cmin) cmin = dataset(n,col);
  }
  Real cspan = cmax - cmin;

  for(uint n = 0; n < data_count; n++) 
  {
    // compute gaussian index to which this point belongs
    uint gi = uint((dataset(n,col) - cmin) / (cspan + 1.0) * Real(num_gaussians));

    // sum the points to obtain means
    m_means[gi] += dataset.row(n);

    index[gi].push_back(n);
  }

  for (uint g = 0; g < num_gaussians; g++)
  {
    uint popsize = index[g].size();
    // avoid division by zero: if no samples are available, initialize to something from somewhere
    if (popsize == 0)
    {
      m_means[g] = dataset.row(g % data_count);
      m_covs[g] = MatrixX::Identity(dim,dim);
      m_weights[g] = 1.0f / Real(num_gaussians);
      continue;
    }

    // average by popsize
    m_means[g] /= Real(popsize);
    // same weight for all gaussians
    m_weights[g] = 1.0f / Real(num_gaussians);
     
    // compute covariance matrix
    for (uint p = 0; p < popsize; p++)
    {
      const Eigen::VectorXf & r = dataset.row(index[g][p]);
      const Eigen::VectorXf & m = m_means[g];
      m_covs[g] += (r - m) * (r - m).transpose();
    }

    m_covs[g] /= Real(popsize);
    m_covs[g] += MatrixX::Identity(dim,dim) * m_epsilon;
  }
}