ScalarType average_neighbor_distance(const DenseMatrix& data, const Neighbors& neighbors)
{
	IndexType k = neighbors[0].size();
	ScalarType average_distance = 0;

	for (IndexType i = 0; i < data.cols(); ++i)
	{
		for (IndexType j = 0; j < k; ++j)
		{
			average_distance += (data.col(i) - data.col(neighbors[i][j])).norm();
		}
	}
	return average_distance / (k * data.cols());
}
SparseMatrixNeighborsPair angles_matrix_and_neighbors(const Neighbors& neighbors,
                                                      const DenseMatrix& data)
{
	const IndexType k = neighbors[0].size();
	const IndexType n_vectors = data.cols();

	SparseTriplets sparse_triplets;
	sparse_triplets.reserve(k * n_vectors);
	/* I tried to find better naming, but... */
	Neighbors most_collinear_neighbors_of_neighbors;
	most_collinear_neighbors_of_neighbors.reserve(n_vectors);

	for (IndexType i = 0; i < n_vectors; ++i)
	{
		const LocalNeighbors& current_neighbors = neighbors[i];
		LocalNeighbors most_collinear_current_neighbors;
		most_collinear_current_neighbors.reserve(k);

		for (IndexType j = 0; j < k; ++j)
		{
			const LocalNeighbors& neighbors_of_neighbor = neighbors[current_neighbors[j]];
			/* The closer the cos value to -1.0 - the closer the angle to 180.0 */
			ScalarType min_cos_value = 1.0, current_cos_value;
			/* This value will be updated during the seach for most collinear neighbor */
			most_collinear_current_neighbors.push_back(0);

			for (IndexType l = 0; l < k; ++l)
			{
				DenseVector neighbor_to_point = data.col(i) - data.col(current_neighbors[j]);
				DenseVector neighbor_to_its_neighbor = data.col(neighbors_of_neighbor[l])
														- data.col(current_neighbors[j]);
				current_cos_value = neighbor_to_point.dot(neighbor_to_its_neighbor) /
									(neighbor_to_point.norm() *
									 neighbor_to_its_neighbor.norm());
				if (current_cos_value < min_cos_value)
				{
					most_collinear_current_neighbors[j] = neighbors_of_neighbor[l];
					min_cos_value = current_cos_value;
				}
			}

			SparseTriplet triplet(i, most_collinear_current_neighbors[j], min_cos_value);
			sparse_triplets.push_back(triplet);
		}
		most_collinear_neighbors_of_neighbors.push_back(most_collinear_current_neighbors);
	}
	return SparseMatrixNeighborsPair
		(sparse_matrix_from_triplets(sparse_triplets, n_vectors, n_vectors),
		 most_collinear_neighbors_of_neighbors);
}
Пример #3
0
DenseMatrix project(RandomAccessIterator begin, RandomAccessIterator end, FeatureVectorCallback callback,
		IndexType dimension, const IndexType max_iter, const ScalarType epsilon,
		const IndexType target_dimension, const DenseVector& mean_vector)
{
	timed_context context("Data projection");

	// The number of data points
	const IndexType n = end-begin;

	// Dense representation of the data points

	DenseVector current_vector(dimension);

	DenseMatrix X = DenseMatrix::Zero(dimension,n);

	for (RandomAccessIterator iter=begin; iter!=end; ++iter)
	{
		callback.vector(*iter,current_vector);
		X.col(iter-begin) = current_vector - mean_vector;
	}

	// Initialize FA model

	// Initial variances
	DenseMatrix sig = DenseMatrix::Identity(dimension,dimension);
	// Initial linear mapping
	DenseMatrix A = DenseMatrix::Random(dimension, target_dimension).cwiseAbs();

	// Main loop
	IndexType iter = 0;
	DenseMatrix invC,M,SC;
	ScalarType ll = 0, newll = 0;
	while (iter < max_iter)
	{
		++iter;

		// Perform E-step

		// Compute the inverse of the covariance matrix
		invC = (A*A.transpose() + sig).inverse();
		M = A.transpose()*invC*X;
		SC = n*(DenseMatrix::Identity(target_dimension,target_dimension) - A.transpose()*invC*A) + M*M.transpose();

		// Perform M-step
		A = (X*M.transpose())*SC.inverse();
		sig = DenseMatrix(((X*X.transpose() - A*M*X.transpose()).diagonal()/n).asDiagonal()).array() + epsilon;

		// Compute log-likelihood of FA model
		newll = 0.5*(log(invC.determinant()) - (invC*X).cwiseProduct(X).sum()/n);

		// Check for convergence
		if ((iter > 1) && (fabs(newll - ll) < epsilon))
			break;

		ll = newll;
	}

	return X.transpose()*A;
}
Пример #4
0
	EmbeddingResult embed(const MatrixType& wm, IndexType target_dimension, unsigned int skip)
	{
		timed_context context("Randomized eigendecomposition");
		
		DenseMatrix O(wm.rows(), target_dimension+skip);
		for (IndexType i=0; i<O.rows(); ++i)
		{
			IndexType j=0;
			for ( ; j+1 < O.cols(); j+= 2)
			{
				ScalarType v1 = (ScalarType)(rand()+1.f)/((float)RAND_MAX+2.f);
				ScalarType v2 = (ScalarType)(rand()+1.f)/((float)RAND_MAX+2.f);
				ScalarType len = sqrt(-2.f*log(v1));
				O(i,j) = len*cos(2.f*M_PI*v2);
				O(i,j+1) = len*sin(2.f*M_PI*v2);
			}
			for ( ; j < O.cols(); j++)
			{
				ScalarType v1 = (ScalarType)(rand()+1.f)/((float)RAND_MAX+2.f);
				ScalarType v2 = (ScalarType)(rand()+1.f)/((float)RAND_MAX+2.f);
				ScalarType len = sqrt(-2.f*log(v1));
				O(i,j) = len*cos(2.f*M_PI*v2);
			}
		}
		MatrixTypeOperation operation(wm);

		DenseMatrix Y = operation(O);
		for (IndexType i=0; i<Y.cols(); i++)
		{
			for (IndexType j=0; j<i; j++)
			{
				ScalarType r = Y.col(i).dot(Y.col(j));
				Y.col(i) -= r*Y.col(j);
			}
			ScalarType norm = Y.col(i).norm();
			if (norm < 1e-4)
			{
				for (int k = i; k<Y.cols(); k++)
					Y.col(k).setZero();
			}
			Y.col(i) *= (1.f / norm);
		}

		DenseMatrix B1 = operation(Y);
		DenseMatrix B = Y.householderQr().solve(B1);
		DenseSelfAdjointEigenSolver eigenOfB(B);

		if (eigenOfB.info() == Eigen::Success)
		{
			DenseMatrix embedding = (Y*eigenOfB.eigenvectors()).block(0, skip, wm.cols(), target_dimension);
			return EmbeddingResult(embedding,eigenOfB.eigenvalues());
		}
		else
		{
			throw eigendecomposition_error("eigendecomposition failed");
		}
		return EmbeddingResult();
	}
Пример #5
0
EigendecompositionResult eigendecomposition_impl_randomized(const MatrixType& wm, IndexType target_dimension, unsigned int skip)
{
	timed_context context("Randomized eigendecomposition");
	
	DenseMatrix O(wm.rows(), target_dimension+skip);
	for (IndexType i=0; i<O.rows(); ++i)
	{
		for (IndexType j=0; j<O.cols(); j++)
		{
			O(i,j) = tapkee::gaussian_random();
		}
	}
	MatrixOperationType operation(wm);

	DenseMatrix Y = operation(O);
	for (IndexType i=0; i<Y.cols(); i++)
	{
		for (IndexType j=0; j<i; j++)
		{
			ScalarType r = Y.col(i).dot(Y.col(j));
			Y.col(i) -= r*Y.col(j);
		}
		ScalarType norm = Y.col(i).norm();
		if (norm < 1e-4)
		{
			for (int k = i; k<Y.cols(); k++)
				Y.col(k).setZero();
		}
		Y.col(i) *= (1.f / norm);
	}

	DenseMatrix B1 = operation(Y);
	DenseMatrix B = Y.householderQr().solve(B1);
	DenseSelfAdjointEigenSolver eigenOfB(B);

	if (eigenOfB.info() == Eigen::Success)
	{
		if (MatrixOperationType::largest)
		{
			assert(skip==0);
			DenseMatrix selected_eigenvectors = (Y*eigenOfB.eigenvectors()).rightCols(target_dimension);
			return EigendecompositionResult(selected_eigenvectors,eigenOfB.eigenvalues());
		} 
		else
		{
			DenseMatrix selected_eigenvectors = (Y*eigenOfB.eigenvectors()).leftCols(target_dimension+skip).rightCols(target_dimension);
			return EigendecompositionResult(selected_eigenvectors,eigenOfB.eigenvalues());
		}
	}
	else
	{
		throw eigendecomposition_error("eigendecomposition failed");
	}
	return EigendecompositionResult();
}
ScalarType compute_error_for_point(const IndexType index, const DenseMatrix& data,
                                   const DataForErrorFunc& error_func_data)
{
	IndexType k = error_func_data.distance_neighbors[0].size();
	ScalarType error_value = 0;
	for (IndexType i = 0; i < k; ++i)
	{
		/* Find new angle */
		IndexType neighbor = error_func_data.distance_neighbors[index][i];
		IndexType neighbor_of_neighbor = error_func_data.angle_neighbors[index][i];
		/* TODO: Extract into a small function, that will find the angle between 3 points */
		DenseVector neighbor_to_point = data.col(index) - data.col(neighbor);
		DenseVector neighbor_to_its_neighbor = data.col(neighbor_of_neighbor)
												- data.col(neighbor);
		ScalarType current_cos_value = neighbor_to_point.dot(neighbor_to_its_neighbor) /
									(neighbor_to_point.norm() *
									 neighbor_to_its_neighbor.norm());
		/* Find new distance */
		ScalarType current_distance = (data.col(index) - data.col(neighbor)).norm();
		/* Compute one component of error function's value*/
		ScalarType diff_cos =
			current_cos_value - error_func_data.angles_matrix.coeff(index, neighbor_of_neighbor);
		if (diff_cos < 0)
			diff_cos = 0;
		ScalarType diff_distance =
			current_distance - error_func_data.distance_matrix.coeff(index, neighbor);
		diff_distance /= error_func_data.average_distance;
		/* Weight for adjusted point should be bigger than 1, according to the
		 * original algorithm
		 */
		ScalarType weight =
			(error_func_data.adjusted_points.count(neighbor) == 0) ? 1 : weight_for_adjusted_point;
		error_value += weight * (diff_cos * diff_cos + diff_distance * diff_distance);
	}
	return error_value;
}
template<typename SparseMatrixType> void sparse_block(const SparseMatrixType& ref)
{
  const Index rows = ref.rows();
  const Index cols = ref.cols();
  const Index inner = ref.innerSize();
  const Index outer = ref.outerSize();

  typedef typename SparseMatrixType::Scalar Scalar;
  typedef typename SparseMatrixType::StorageIndex StorageIndex;

  double density = (std::max)(8./(rows*cols), 0.01);
  typedef Matrix<Scalar,Dynamic,Dynamic,SparseMatrixType::IsRowMajor?RowMajor:ColMajor> DenseMatrix;
  typedef Matrix<Scalar,Dynamic,1> DenseVector;
  typedef Matrix<Scalar,1,Dynamic> RowDenseVector;
  typedef SparseVector<Scalar> SparseVectorType;

  Scalar s1 = internal::random<Scalar>();
  {
    SparseMatrixType m(rows, cols);
    DenseMatrix refMat = DenseMatrix::Zero(rows, cols);
    initSparse<Scalar>(density, refMat, m);

    VERIFY_IS_APPROX(m, refMat);

    // test InnerIterators and Block expressions
    for (int t=0; t<10; ++t)
    {
      Index j = internal::random<Index>(0,cols-2);
      Index i = internal::random<Index>(0,rows-2);
      Index w = internal::random<Index>(1,cols-j);
      Index h = internal::random<Index>(1,rows-i);

      VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w));
      for(Index c=0; c<w; c++)
      {
        VERIFY_IS_APPROX(m.block(i,j,h,w).col(c), refMat.block(i,j,h,w).col(c));
        for(Index r=0; r<h; r++)
        {
          VERIFY_IS_APPROX(m.block(i,j,h,w).col(c).coeff(r), refMat.block(i,j,h,w).col(c).coeff(r));
          VERIFY_IS_APPROX(m.block(i,j,h,w).coeff(r,c), refMat.block(i,j,h,w).coeff(r,c));
        }
      }
      for(Index r=0; r<h; r++)
      {
        VERIFY_IS_APPROX(m.block(i,j,h,w).row(r), refMat.block(i,j,h,w).row(r));
        for(Index c=0; c<w; c++)
        {
          VERIFY_IS_APPROX(m.block(i,j,h,w).row(r).coeff(c), refMat.block(i,j,h,w).row(r).coeff(c));
          VERIFY_IS_APPROX(m.block(i,j,h,w).coeff(r,c), refMat.block(i,j,h,w).coeff(r,c));
        }
      }
      
      VERIFY_IS_APPROX(m.middleCols(j,w), refMat.middleCols(j,w));
      VERIFY_IS_APPROX(m.middleRows(i,h), refMat.middleRows(i,h));
      for(Index r=0; r<h; r++)
      {
        VERIFY_IS_APPROX(m.middleCols(j,w).row(r), refMat.middleCols(j,w).row(r));
        VERIFY_IS_APPROX(m.middleRows(i,h).row(r), refMat.middleRows(i,h).row(r));
        for(Index c=0; c<w; c++)
        {
          VERIFY_IS_APPROX(m.col(c).coeff(r), refMat.col(c).coeff(r));
          VERIFY_IS_APPROX(m.row(r).coeff(c), refMat.row(r).coeff(c));
          
          VERIFY_IS_APPROX(m.middleCols(j,w).coeff(r,c), refMat.middleCols(j,w).coeff(r,c));
          VERIFY_IS_APPROX(m.middleRows(i,h).coeff(r,c), refMat.middleRows(i,h).coeff(r,c));
          if(m.middleCols(j,w).coeff(r,c) != Scalar(0))
          {
            VERIFY_IS_APPROX(m.middleCols(j,w).coeffRef(r,c), refMat.middleCols(j,w).coeff(r,c));
          }
          if(m.middleRows(i,h).coeff(r,c) != Scalar(0))
          {
            VERIFY_IS_APPROX(m.middleRows(i,h).coeff(r,c), refMat.middleRows(i,h).coeff(r,c));
          }
        }
      }
      for(Index c=0; c<w; c++)
      {
        VERIFY_IS_APPROX(m.middleCols(j,w).col(c), refMat.middleCols(j,w).col(c));
        VERIFY_IS_APPROX(m.middleRows(i,h).col(c), refMat.middleRows(i,h).col(c));
      }
    }

    for(Index c=0; c<cols; c++)
    {
      VERIFY_IS_APPROX(m.col(c) + m.col(c), (m + m).col(c));
      VERIFY_IS_APPROX(m.col(c) + m.col(c), refMat.col(c) + refMat.col(c));
    }

    for(Index r=0; r<rows; r++)
    {
      VERIFY_IS_APPROX(m.row(r) + m.row(r), (m + m).row(r));
      VERIFY_IS_APPROX(m.row(r) + m.row(r), refMat.row(r) + refMat.row(r));
    }
  }

  // test innerVector()
  {
    DenseMatrix refMat2 = DenseMatrix::Zero(rows, cols);
    SparseMatrixType m2(rows, cols);
    initSparse<Scalar>(density, refMat2, m2);
    Index j0 = internal::random<Index>(0,outer-1);
    Index j1 = internal::random<Index>(0,outer-1);
    Index r0 = internal::random<Index>(0,rows-1);
    Index c0 = internal::random<Index>(0,cols-1);

    VERIFY_IS_APPROX(m2.innerVector(j0), innervec(refMat2,j0));
    VERIFY_IS_APPROX(m2.innerVector(j0)+m2.innerVector(j1), innervec(refMat2,j0)+innervec(refMat2,j1));

    m2.innerVector(j0) *= Scalar(2);
    innervec(refMat2,j0) *= Scalar(2);
    VERIFY_IS_APPROX(m2, refMat2);

    m2.row(r0) *= Scalar(3);
    refMat2.row(r0) *= Scalar(3);
    VERIFY_IS_APPROX(m2, refMat2);

    m2.col(c0) *= Scalar(4);
    refMat2.col(c0) *= Scalar(4);
    VERIFY_IS_APPROX(m2, refMat2);

    m2.row(r0) /= Scalar(3);
    refMat2.row(r0) /= Scalar(3);
    VERIFY_IS_APPROX(m2, refMat2);

    m2.col(c0) /= Scalar(4);
    refMat2.col(c0) /= Scalar(4);
    VERIFY_IS_APPROX(m2, refMat2);

    SparseVectorType v1;
    VERIFY_IS_APPROX(v1 = m2.col(c0) * 4, refMat2.col(c0)*4);
    VERIFY_IS_APPROX(v1 = m2.row(r0) * 4, refMat2.row(r0).transpose()*4);

    SparseMatrixType m3(rows,cols);
    m3.reserve(VectorXi::Constant(outer,int(inner/2)));
    for(Index j=0; j<outer; ++j)
      for(Index k=0; k<(std::min)(j,inner); ++k)
        m3.insertByOuterInner(j,k) = internal::convert_index<StorageIndex>(k+1);
    for(Index j=0; j<(std::min)(outer, inner); ++j)
    {
      VERIFY(j==numext::real(m3.innerVector(j).nonZeros()));
      if(j>0)
        VERIFY(j==numext::real(m3.innerVector(j).lastCoeff()));
    }
    m3.makeCompressed();
    for(Index j=0; j<(std::min)(outer, inner); ++j)
    {
      VERIFY(j==numext::real(m3.innerVector(j).nonZeros()));
      if(j>0)
        VERIFY(j==numext::real(m3.innerVector(j).lastCoeff()));
    }

    VERIFY(m3.innerVector(j0).nonZeros() == m3.transpose().innerVector(j0).nonZeros());

//     m2.innerVector(j0) = 2*m2.innerVector(j1);
//     refMat2.col(j0) = 2*refMat2.col(j1);
//     VERIFY_IS_APPROX(m2, refMat2);
  }

  // test innerVectors()
  {
    DenseMatrix refMat2 = DenseMatrix::Zero(rows, cols);
    SparseMatrixType m2(rows, cols);
    initSparse<Scalar>(density, refMat2, m2);
    if(internal::random<float>(0,1)>0.5f) m2.makeCompressed();
    Index j0 = internal::random<Index>(0,outer-2);
    Index j1 = internal::random<Index>(0,outer-2);
    Index n0 = internal::random<Index>(1,outer-(std::max)(j0,j1));
    if(SparseMatrixType::IsRowMajor)
      VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(j0,0,n0,cols));
    else
      VERIFY_IS_APPROX(m2.innerVectors(j0,n0), refMat2.block(0,j0,rows,n0));
    if(SparseMatrixType::IsRowMajor)
      VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0),
                       refMat2.middleRows(j0,n0)+refMat2.middleRows(j1,n0));
    else
      VERIFY_IS_APPROX(m2.innerVectors(j0,n0)+m2.innerVectors(j1,n0),
                      refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0));
    
    VERIFY_IS_APPROX(m2, refMat2);
    
    VERIFY(m2.innerVectors(j0,n0).nonZeros() == m2.transpose().innerVectors(j0,n0).nonZeros());
    
    m2.innerVectors(j0,n0) = m2.innerVectors(j0,n0) + m2.innerVectors(j1,n0);
    if(SparseMatrixType::IsRowMajor)
      refMat2.middleRows(j0,n0) = (refMat2.middleRows(j0,n0) + refMat2.middleRows(j1,n0)).eval();
    else
      refMat2.middleCols(j0,n0) = (refMat2.middleCols(j0,n0) + refMat2.middleCols(j1,n0)).eval();
    
    VERIFY_IS_APPROX(m2, refMat2);
  }

  // test generic blocks
  {
    DenseMatrix refMat2 = DenseMatrix::Zero(rows, cols);
    SparseMatrixType m2(rows, cols);
    initSparse<Scalar>(density, refMat2, m2);
    Index j0 = internal::random<Index>(0,outer-2);
    Index j1 = internal::random<Index>(0,outer-2);
    Index n0 = internal::random<Index>(1,outer-(std::max)(j0,j1));
    if(SparseMatrixType::IsRowMajor)
      VERIFY_IS_APPROX(m2.block(j0,0,n0,cols), refMat2.block(j0,0,n0,cols));
    else
      VERIFY_IS_APPROX(m2.block(0,j0,rows,n0), refMat2.block(0,j0,rows,n0));
    
    if(SparseMatrixType::IsRowMajor)
      VERIFY_IS_APPROX(m2.block(j0,0,n0,cols)+m2.block(j1,0,n0,cols),
                      refMat2.block(j0,0,n0,cols)+refMat2.block(j1,0,n0,cols));
    else
      VERIFY_IS_APPROX(m2.block(0,j0,rows,n0)+m2.block(0,j1,rows,n0),
                      refMat2.block(0,j0,rows,n0)+refMat2.block(0,j1,rows,n0));
      
    Index i = internal::random<Index>(0,m2.outerSize()-1);
    if(SparseMatrixType::IsRowMajor) {
      m2.innerVector(i) = m2.innerVector(i) * s1;
      refMat2.row(i) = refMat2.row(i) * s1;
      VERIFY_IS_APPROX(m2,refMat2);
    } else {
      m2.innerVector(i) = m2.innerVector(i) * s1;
      refMat2.col(i) = refMat2.col(i) * s1;
      VERIFY_IS_APPROX(m2,refMat2);
    }
    
    Index r0 = internal::random<Index>(0,rows-2);
    Index c0 = internal::random<Index>(0,cols-2);
    Index r1 = internal::random<Index>(1,rows-r0);
    Index c1 = internal::random<Index>(1,cols-c0);
    
    VERIFY_IS_APPROX(DenseVector(m2.col(c0)), refMat2.col(c0));
    VERIFY_IS_APPROX(m2.col(c0), refMat2.col(c0));
    
    VERIFY_IS_APPROX(RowDenseVector(m2.row(r0)), refMat2.row(r0));
    VERIFY_IS_APPROX(m2.row(r0), refMat2.row(r0));

    VERIFY_IS_APPROX(m2.block(r0,c0,r1,c1), refMat2.block(r0,c0,r1,c1));
    VERIFY_IS_APPROX((2*m2).block(r0,c0,r1,c1), (2*refMat2).block(r0,c0,r1,c1));

    if(m2.nonZeros()>0)
    {
      VERIFY_IS_APPROX(m2, refMat2);
      SparseMatrixType m3(rows, cols);
      DenseMatrix refMat3(rows, cols); refMat3.setZero();
      Index n = internal::random<Index>(1,10);
      for(Index k=0; k<n; ++k)
      {
        Index o1 = internal::random<Index>(0,outer-1);
        Index o2 = internal::random<Index>(0,outer-1);
        if(SparseMatrixType::IsRowMajor)
        {
          m3.innerVector(o1) = m2.row(o2);
          refMat3.row(o1) = refMat2.row(o2);
        }
        else
        {
          m3.innerVector(o1) = m2.col(o2);
          refMat3.col(o1) = refMat2.col(o2);
        }
        if(internal::random<bool>())
          m3.makeCompressed();
      }
      if(m3.nonZeros()>0)
      VERIFY_IS_APPROX(m3, refMat3);
    }
  }
}
Пример #8
0
EmbeddingResult spe_embedding(RandomAccessIterator begin, RandomAccessIterator end,
		PairwiseCallback callback, const Neighbors& neighbors,
		unsigned int target_dimension, bool global_strategy,
		DefaultScalarType tolerance, int nupdates)
{
	timed_context context("SPE embedding computation");
	unsigned int k = 0;
	if (!global_strategy)
		k = neighbors[0].size();

	// The number of data points
	int N = end-begin;
	while (nupdates > N/2)
		nupdates = N/2;

	// Look for the maximum distance
	DefaultScalarType max = 0.0;
	for (RandomAccessIterator i_iter=begin; i_iter!=end; ++i_iter)
	{
		for (RandomAccessIterator j_iter=i_iter+1; j_iter!=end; ++j_iter)
		{
			max = std::max(max, callback(*i_iter,*j_iter));
		}
	}

	// Distances normalizer used in global strategy
	DefaultScalarType alpha = 0.0;
	if (global_strategy)
		alpha = 1.0 / max * std::sqrt(2.0);

	// Random embedding initialization, Y is the short for embedding_feature_matrix
	std::srand(std::time(0));
	DenseMatrix Y = (DenseMatrix::Random(target_dimension,N)
		       + DenseMatrix::Ones(target_dimension,N)) / 2;
	// Auxiliary diffference embedding feature matrix
	DenseMatrix Yd(target_dimension,nupdates);

	// SPE's main loop
	
	typedef std::vector<int> Indices;
	typedef std::vector<int>::iterator IndexIterator;

	// Maximum number of iterations
	int max_iter = 2000 + round(0.04 * N*N);
	if (!global_strategy)
		max_iter *= 3;
	// Learning parameter
	DefaultScalarType lambda = 1.0;
	// Vector of indices used for shuffling
	Indices indices(N);
	for (int i=0; i<N; ++i)
		indices[i] = i;
	// Vector with distances in the original space of the points to update
	DenseVector Rt(nupdates);
	DenseVector scale(nupdates);
	DenseVector D(nupdates);
	// Pointers to the indices of the elements to update
	IndexIterator ind1;
	IndexIterator ind2;
	// Helper used in local strategy
	Indices ind1Neighbors;
	if (!global_strategy)
		ind1Neighbors.resize(k*nupdates);

	for (int i=0; i<max_iter; ++i)
	{
		// Shuffle to select the vectors to update in this iteration
		std::random_shuffle(indices.begin(),indices.end());

		ind1 = indices.begin();
		ind2 = indices.begin()+nupdates;

		// With local strategy, the seecond set of indices is selected among
		// neighbors of the first set
		if (!global_strategy)
		{
			// Neighbors of interest
			for(int j=0; j<nupdates; ++j)
			{
				const LocalNeighbors& current_neighbors =
					neighbors[*ind1++];

				for(unsigned int kk=0; kk<k; ++kk)
					ind1Neighbors[kk + j*k] = current_neighbors[kk];
			}
			// Restore ind1
			ind1 = indices.begin();

			// Generate pseudo-random indices and select final indices
			for(int j=0; j<nupdates; ++j)
			{
				unsigned int r = round( std::rand()*1.0/RAND_MAX*(k-1) ) + k*j;
				indices[nupdates+j] = ind1Neighbors[r];
			}
		}


		// Compute distances between the selected points in the embedded space
		for(int j=0; j<nupdates; ++j)
		{
			//FIXME it seems that here Euclidean distance is forced
			D[j] = (Y.col(*ind1) - Y.col(*ind2)).norm();
			++ind1, ++ind2;
		}

		// Get the corresponding distances in the original space
		if (global_strategy)
			Rt.fill(alpha);
		else // local_strategy
			Rt.fill(1);

		ind1 = indices.begin();
		ind2 = indices.begin()+nupdates;
		for (int j=0; j<nupdates; ++j)
			Rt[j] *= callback(*(begin + *ind1++), *(begin + *ind2++));

		// Compute some terms for update

		// Scale factor
		D.array() += tolerance;
		scale = (Rt-D).cwiseQuotient(D);

		ind1 = indices.begin();
		ind2 = indices.begin()+nupdates;
		// Difference matrix
		for (int j=0; j<nupdates; ++j)
		{
			Yd.col(j).noalias() = Y.col(*ind1) - Y.col(*ind2);

			++ind1, ++ind2;
		}

		ind1 = indices.begin();
		ind2 = indices.begin()+nupdates;
		// Update the location of the vectors in the embedded space
		for (int j=0; j<nupdates; ++j)
		{
			Y.col(*ind1) += lambda / 2 * scale[j] * Yd.col(j);
			Y.col(*ind2) -= lambda / 2 * scale[j] * Yd.col(j);

			++ind1, ++ind2;
		}

		// Update the learning parameter
		lambda = lambda - ( lambda / max_iter );
	}

	return EmbeddingResult(Y.transpose(),DenseVector());
};
Пример #9
0
 static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) {
   int r  = internal::random(0,m2.rows()-1);
   int c1 = internal::random(0,m2.cols()-1);
   VERIFY_IS_APPROX(m4=m2.row(r).transpose()*refMat2.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat2.col(c1).transpose());
   VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.row(r), refMat4=refMat2.col(c1)*refMat2.row(r));
 }
Пример #10
0
SparseWeightMatrix kltsa_weight_matrix(RandomAccessIterator begin, RandomAccessIterator end, 
                                       const Neighbors& neighbors, PairwiseCallback callback, 
                                       unsigned int target_dimension, DefaultScalarType shift,
                                       bool partial_eigendecomposer=false)
{
	timed_context context("KLTSA weight matrix computation");
	const unsigned int k = neighbors[0].size();

	SparseTriplets sparse_triplets;
	sparse_triplets.reserve((k*k+k+1)*(end-begin));

	RandomAccessIterator iter;
	RandomAccessIterator iter_begin = begin, iter_end = end;
	DenseMatrix gram_matrix = DenseMatrix::Zero(k,k);
	DenseVector col_means(k), row_means(k);
	DenseVector rhs = DenseVector::Ones(k);
	DenseMatrix G = DenseMatrix::Zero(k,target_dimension+1);
	G.col(0).setConstant(1/sqrt(DefaultScalarType(k)));
	DefaultDenseSelfAdjointEigenSolver solver;

	RESTRICT_ALLOC;
//#pragma omp parallel for private(iter,gram_matrix,G)
	for (iter=iter_begin; iter<iter_end; ++iter)
	{
		const LocalNeighbors& current_neighbors = neighbors[iter-begin];
	
		for (unsigned int i=0; i<k; ++i)
		{
			for (unsigned int j=i; j<k; ++j)
			{
				DefaultScalarType kij = callback(begin[current_neighbors[i]],begin[current_neighbors[j]]);
				gram_matrix(i,j) = kij;
				gram_matrix(j,i) = kij;
			}
		}
		
		col_means = gram_matrix.colwise().mean();
		DefaultScalarType grand_mean = gram_matrix.mean();
		gram_matrix.array() += grand_mean;
		gram_matrix.rowwise() -= col_means.transpose();
		gram_matrix.colwise() -= col_means;

		UNRESTRICT_ALLOC;
		if (partial_eigendecomposer)
		{
			G.rightCols(target_dimension).noalias() = eigen_embedding<DenseMatrix,DenseMatrixOperation>(ARPACK,gram_matrix,target_dimension,0).first;
		}
		else
		{
			solver.compute(gram_matrix);
			G.rightCols(target_dimension).noalias() = solver.eigenvectors().rightCols(target_dimension);
		}
		RESTRICT_ALLOC;
		gram_matrix.noalias() = G * G.transpose();
		
		sparse_triplets.push_back(SparseTriplet(iter-begin,iter-begin,shift));
		for (unsigned int i=0; i<k; ++i)
		{
			sparse_triplets.push_back(SparseTriplet(current_neighbors[i],current_neighbors[i],1.0));
			for (unsigned int j=0; j<k; ++j)
				sparse_triplets.push_back(SparseTriplet(current_neighbors[i],current_neighbors[j],
				                                        -gram_matrix(i,j)));
		}
	}
	UNRESTRICT_ALLOC;

	SparseWeightMatrix weight_matrix(end-begin,end-begin);
	weight_matrix.setFromTriplets(sparse_triplets.begin(),sparse_triplets.end());

	return weight_matrix;
}
Пример #11
0
SparseWeightMatrix hlle_weight_matrix(RandomAccessIterator begin, RandomAccessIterator end, 
                                      const Neighbors& neighbors, PairwiseCallback callback, unsigned int target_dimension)
{
	timed_context context("KLTSA weight matrix computation");
	const unsigned int k = neighbors[0].size();

	SparseTriplets sparse_triplets;
	sparse_triplets.reserve(k*k*(end-begin));

	RandomAccessIterator iter_begin = begin, iter_end = end;
	DenseMatrix gram_matrix = DenseMatrix::Zero(k,k);
	DenseVector col_means(k), row_means(k);
	DenseVector rhs = DenseVector::Ones(k);
	DenseMatrix G = DenseMatrix::Zero(k,target_dimension+1);

	RandomAccessIterator iter;
	for (iter=iter_begin; iter!=iter_end; ++iter)
	{
		const LocalNeighbors& current_neighbors = neighbors[iter-begin];
	
		for (unsigned int i=0; i<k; ++i)
		{
			for (unsigned int j=i; j<k; ++j)
			{
				DefaultScalarType kij = callback(begin[current_neighbors[i]],begin[current_neighbors[j]]);
				gram_matrix(i,j) = kij;
				gram_matrix(j,i) = kij;
			}
		}
		
		for (unsigned int i=0; i<k; ++i)
		{
			col_means[i] = gram_matrix.col(i).mean();
			row_means[i] = gram_matrix.row(i).mean();
		}
		DefaultScalarType grand_mean = gram_matrix.mean();
		gram_matrix.array() += grand_mean;
		gram_matrix.rowwise() -= col_means.transpose();
		gram_matrix.colwise() -= row_means;
		
		DefaultDenseSelfAdjointEigenSolver sae_solver;
		sae_solver.compute(gram_matrix);

		G.col(0).setConstant(1/sqrt(DefaultScalarType(k)));
		G.rightCols(target_dimension).noalias() = sae_solver.eigenvectors().rightCols(target_dimension);
		gram_matrix = G * G.transpose();
		
		sparse_triplets.push_back(SparseTriplet(iter-begin,iter-begin,1e-8));
		for (unsigned int i=0; i<k; ++i)
		{
			sparse_triplets.push_back(SparseTriplet(current_neighbors[i],current_neighbors[i],1.0));
			for (unsigned int j=0; j<k; ++j)
				sparse_triplets.push_back(SparseTriplet(current_neighbors[i],current_neighbors[j],
				                                        -gram_matrix(i,j)));
		}
	}

	SparseWeightMatrix weight_matrix(end-begin,end-begin);
	weight_matrix.setFromTriplets(sparse_triplets.begin(),sparse_triplets.end());

	return weight_matrix;
};
Пример #12
0
SparseWeightMatrix tangent_weight_matrix(RandomAccessIterator begin, RandomAccessIterator end,
                                         const Neighbors& neighbors, PairwiseCallback callback,
                                         const IndexType target_dimension, const ScalarType shift,
                                         const bool partial_eigendecomposer=false)
{
	timed_context context("KLTSA weight matrix computation");
	const IndexType k = neighbors[0].size();

	SparseTriplets sparse_triplets;
	sparse_triplets.reserve((k*k+2*k+1)*(end-begin));

#pragma omp parallel shared(begin,end,neighbors,callback,sparse_triplets) default(none)
	{
		IndexType index_iter;
		DenseMatrix gram_matrix = DenseMatrix::Zero(k,k);
		DenseVector rhs = DenseVector::Ones(k);
		DenseMatrix G = DenseMatrix::Zero(k,target_dimension+1);
		G.col(0).setConstant(1/sqrt(static_cast<ScalarType>(k)));
		DenseSelfAdjointEigenSolver solver;
		SparseTriplets local_triplets;
		local_triplets.reserve(k*k+2*k+1);

#pragma omp for nowait
		for (index_iter=0; index_iter<static_cast<IndexType>(end-begin); index_iter++)
		{
			const LocalNeighbors& current_neighbors = neighbors[index_iter];

			for (IndexType i=0; i<k; ++i)
			{
				for (IndexType j=i; j<k; ++j)
				{
					ScalarType kij = callback.kernel(begin[current_neighbors[i]],begin[current_neighbors[j]]);
					gram_matrix(i,j) = kij;
					gram_matrix(j,i) = kij;
				}
			}

			centerMatrix(gram_matrix);

			//UNRESTRICT_ALLOC;
#ifdef TAPKEE_WITH_ARPACK
			if (partial_eigendecomposer)
			{
				G.rightCols(target_dimension).noalias() =
					eigendecomposition<DenseMatrix,DenseMatrixOperation>(Arpack,gram_matrix,target_dimension,0).first;
			}
			else
#endif
			{
				solver.compute(gram_matrix);
				G.rightCols(target_dimension).noalias() = solver.eigenvectors().rightCols(target_dimension);
			}
			//RESTRICT_ALLOC;
			gram_matrix.noalias() = G * G.transpose();

			SparseTriplet diagonal_triplet(index_iter,index_iter,shift);
			local_triplets.push_back(diagonal_triplet);
			for (IndexType i=0; i<k; ++i)
			{
				SparseTriplet neighborhood_diagonal_triplet(current_neighbors[i],current_neighbors[i],1.0);
				local_triplets.push_back(neighborhood_diagonal_triplet);

				for (IndexType j=0; j<k; ++j)
				{
					SparseTriplet tangent_triplet(current_neighbors[i],current_neighbors[j],-gram_matrix(i,j));
					local_triplets.push_back(tangent_triplet);
				}
			}
#pragma omp critical
			{
				copy(local_triplets.begin(),local_triplets.end(),back_inserter(sparse_triplets));
			}

			local_triplets.clear();
		}
	}

	return sparse_matrix_from_triplets(sparse_triplets, end-begin, end-begin);
}