SparseMatrix NeighborhoodBuilder::buildMatrix_(std::vector<T>& entries, const DenseMatrix& mat) const
	{
		// Make sure, that there are no duplicate entries
		std::sort(entries.begin(), entries.begin() + k_ * mat.rows(), triple_less);
		auto new_end = std::unique(entries.begin(), entries.begin() + k_ * mat.rows(), triple_equal);

		// An iterator for inserting the transposed entries
		// This will also serve as the new end pointer of the
		// entries array.
		auto new_it = new_end;

		// Symmetrize the matrix
		for(auto old_it = entries.begin(); old_it != new_end; ++old_it, ++new_it) {
			*new_it = T(old_it->col(), old_it->row(), old_it->value());
		}

		// Build the temporary matrix for the results
		SparseMatrix result(mat.rowNames(), mat.rowNames());

		// Fill the matrix and convert to CCS
		// new_it points to the end of valid matrix entries.
		result.matrix().setFromTriplets(entries.begin(), new_it);
		result.matrix().makeCompressed();

		return result;
	}
Esempio n. 2
0
DenseMatrix<FloatType> DenseMatrix<FloatType>::subtract(const DenseMatrix<FloatType> &A, const DenseMatrix<FloatType> &B)
{
	MLIB_ASSERT_STR(A.rows() == B.rows() && A.cols() == B.cols(), "invalid matrix dimensions");

	const UINT rows = A.m_rows;
	const UINT cols = A.m_cols;

	DenseMatrix<FloatType> result(A.m_rows, A.m_cols);
	for(UINT row = 0; row < rows; row++)
		for(UINT col = 0; col < cols; col++)
			result.m_dataPtr[row * cols + col] = A.m_dataPtr[row * cols + col] - B.m_dataPtr[row * cols + col];
	return result;
}
	GPUDenseMatrixOperation(const DenseMatrix& matrix)
	{
		mat = viennacl::matrix<ScalarType>(matrix.cols(),matrix.rows());
		vec = viennacl::matrix<ScalarType>(matrix.cols(),1);
		res = viennacl::matrix<ScalarType>(matrix.cols(),1);
		viennacl::copy(matrix,mat);
	}
Esempio n. 4
0
DenseVector ConstraintBSpline::evalHessian(const DenseVector &x) const
{
    DenseVector xa = adjustToDomainBounds(x);
    DenseVector ddx = DenseVector::Zero(nnzHessian);

    // Get x-values
    DenseVector xx = xa.block(0,0,bspline.getNumVariables(),1);

    // Calculate Hessian
    DenseMatrix H = bspline.evalHessian(xx);

    // H is symmetric so fill out lower left triangle only
    int idx = 0;
    for (int row = 0; row < H.rows(); row++)
    {
        for (int col = 0; col <= row; col++)
        {
            //if (H(row,col) != 0)
            //{
                ddx(idx++) = H(row,col);
            //}
        }
    }

    return ddx;
}
Esempio n. 5
0
DenseVector ConstraintBSpline::evalJacobian(const DenseVector &x) const
{
    DenseVector xa = adjustToDomainBounds(x);
    DenseVector dx = DenseVector::Zero(nnzJacobian);

    //return centralDifference(xa);

    // Get x-values
    DenseVector xx = xa.block(0,0,bspline.getNumVariables(),1);

    // Evaluate B-spline Jacobian
    DenseMatrix jac = bspline.evalJacobian(xx);

    // Derivatives on inputs x
    int k = 0;
    for (int i = 0; i < jac.rows(); i++)
    {
        for (int j = 0; j < jac.cols(); j++)
        {
            dx(k++) = jac(i,j);
        }
    }

    // Derivatives on outputs y
    for (unsigned int i = 0; i < numConstraints; i++)
    {
        dx(k++) = -1;
    }

    return dx;
}
	GPUDenseImplicitSquareMatrixOperation(const DenseMatrix& matrix)
	{
		timed_context c("Storing matrices");
		mat = viennacl::matrix<ScalarType>(matrix.cols(),matrix.rows());
		vec = viennacl::matrix<ScalarType>(matrix.cols(),1);
		res = viennacl::matrix<ScalarType>(matrix.cols(),1);
		viennacl::copy(matrix,mat);
	}
Esempio n. 7
0
void BSpline::setControlPoints(const DenseMatrix &controlPoints)
{
    if (controlPoints.cols() != numVariables + 1)
        throw Exception("BSpline::setControlPoints: Incompatible size of control point matrix.");

    int nc = controlPoints.rows();

    knotaverages = controlPoints.block(0, 0, nc, numVariables);
    coefficients = controlPoints.block(0, numVariables, nc, 1);

    checkControlPoints();
}
Esempio n. 8
0
DenseMatrix<FloatType> DenseMatrix<FloatType>::multiply(const DenseMatrix<FloatType> &A, const DenseMatrix<FloatType> &B)
{
	MLIB_ASSERT_STR(A.cols() == B.rows(), "invalid dimensions");

	const UINT rows = A.rows();
	const UINT cols = B.cols();
	const UINT innerCount = A.cols();

	DenseMatrix<FloatType> result(rows, cols);
	
	for(UINT row = 0; row < rows; row++)
		for(UINT col = 0; col < cols; col++)
		{
			FloatType sum = 0.0;
			for(UINT inner = 0; inner < innerCount; inner++)
				sum += A(row, inner) * B(inner, col);
			result(row, col) = sum;
		}

	return result;
}
	SparseMatrix NeighborhoodBuilder::build(DenseMatrix mat) const
	{
		/*
		 * This matrix contains the k_ best neighbors for every vertex.
		 * The candidates are sorted in ascending order.
		 *
		 * The matrix is updated for entry as correlations are computed.
		 * This allows us to cut the computation time in half!
		 *
		 * We allocate twice the (worst-case) storage needed, as we have
		 * to symmetrize the matrix afterwards. Alternatively we could
		 * resize later, which might lead to a full copy.
		 */
		std::vector<T> entries(2 * k_ * mat.rows());

		std::vector<DenseMatrix::value_type> sd(mat.rows());

		DenseMatrix::Vector mu = mat.matrix().rowwise().mean();

		for(unsigned int i = 0; i < mat.rows(); ++i) {
			mat.row(i) = mat.row(i).array() - mu[i];
			sd[i] = mat.row(i).norm();
		}

		for(unsigned int i = 0; i < mat.rows(); ++i) {
			for(unsigned int j = i + 1; j < mat.rows(); ++j) {
				double cov = mat.row(i).dot(mat.row(j));
				cov = fabs(cov) / (sd[i] * sd[j]);

				// Insert into the entries vector
				insert_(T(i, j, cov), entries.begin() + i * k_);
				insert_(T(i, j, cov), entries.begin() + j * k_);
			}
		}

		return buildMatrix_(entries, mat);
	}
Esempio n. 10
0
/*
 * Calculate coefficients of B-spline representing a multivariate polynomial
 *
 * The polynomial f(x), with x in R^n, has m terms on the form
 * f(x) = c(0)*x(0)^E(0,0)*x(1)^E(0,1)*...*x(n-1)^E(0,n-1)
 *       +c(1)*x(0)^E(1,0)*x(1)^E(1,1)*...*x(n-1)^E(1,n-1)
 *       +...
 *       +c(m-1)*x(0)^E(m-1,0)*x(1)^E(m-1,1)*...*x(n-1)^E(m-1,n-1)
 * where c in R^m is a vector with coefficients for each of the m terms,
 * and E in N^(mxn) is a matrix with the exponents of each variable in each of the m terms,
 * e.g. the first row of E defines the first term with variable exponents E(0,0) to E(0,n-1).
 *
 * Note: E must be a matrix of nonnegative integers
 */
DenseMatrix getBSplineBasisCoefficients(DenseVector c, DenseMatrix E, std::vector<double> lb, std::vector<double> ub)
{
    unsigned int dim = E.cols();
    unsigned int terms = E.rows();
    assert(dim >= 1); // At least one variable
    assert(terms >= 1); // At least one term (assumes that c is a column vector)
    assert(terms == c.rows());
    assert(dim == lb.size());
    assert(dim == ub.size());

    // Get highest power of each variable
    DenseVector powers = E.colwise().maxCoeff();

    // Store in std vector
    std::vector<unsigned int> powers2;
    for (unsigned int i = 0; i < powers.size(); ++i)
        powers2.push_back(powers(i));

    // Calculate tensor product transformation matrix T
    DenseMatrix T = getTransformationMatrix(powers2, lb, ub);

    // Compute power basis coefficients (lambda vector)
    SparseMatrix L(T.cols(),1);
    L.setZero();

    for (unsigned int i = 0; i < terms; i++)
    {
        SparseMatrix Li(1,1);
        Li.insert(0,0) = 1;

        for (unsigned int j = 0; j < dim; j++)
        {
            int e = E(i,j);
            SparseVector li(powers(j)+1);
            li.reserve(1);
            li.insert(e) = 1;

            SparseMatrix temp = Li;
            Li = kroneckerProduct(temp, li);
        }

        L += c(i)*Li;
    }

    // Compute B-spline coefficients
    DenseMatrix C = T*L;

    return C;
}
Esempio n. 11
0
ConstraintQuadratic::ConstraintQuadratic(std::vector<VariablePtr> variables, DenseMatrix A, DenseMatrix b, double c, double lb, double ub)
    : Constraint(variables), A(A), b(b), c(c)
{
    assert(A.cols() == (int)variables.size());
    assert(A.rows() == b.rows());
    assert(b.cols() == 1);

    numConstraints = 1;

    this->lb.push_back(lb);
    this->ub.push_back(ub);

    jacobianCalculated = true;
    hessianCalculated = true;
    constraintLinear = false;
    constraintConvex = false;
    convexRelaxationAvailable = true;

    nnzJacobian = A.rows();
    nnzHessian = 0;

    constraintName = "Constraint Quadratic";

    //    // Check for parameters for NaN
    //    for (int i = 0; i < A.rows(); i++)
    //    {
    //        for (int j = 0; j < A.cols(); j++)
    //        {
    //            bool nanA = false;
    //            if (A(i,j) != A(i,j)) nanA = true;
    //            assert(nanA == false);
    //        }
    //        bool nanb = false;
    //        if (b(i) != b(i)) nanb = true;
    //        assert(nanb == false);
    //    }

    // Calculate and store Hessian
    H = A.transpose() + A;

    // H is symmetric so fill out lower left triangle only
    for (int row = 0; row < H.rows(); row++)
    {
        for (int col = 0; col <= row; col++)
        {
            if (H(row,col) != 0)
            {
                nnzHessian++;
            }
        }
    }

    // Check convexity using Hessian
    Eigen::EigenSolver<DenseMatrix> es(H);
    DenseVector eigs = es.eigenvalues().real();
    double minEigVal = 0;
    for (int i = 0; i < eigs.rows(); i++)
    {
        if (eigs(i) < minEigVal) minEigVal = eigs(i);
    }

    if (minEigVal >= 0 && lb <= -INF)
    {
        constraintConvex = true;
    }
    // Note could also check that max. eigen value <= 0 and ub = INF

    checkConstraintSanity();
}
void manifold_sculpting_embed(RandomAccessIterator begin, RandomAccessIterator end,
                              DenseMatrix& data, IndexType target_dimension,
                              const Neighbors& neighbors, DistanceCallback callback,
                              IndexType max_iteration, ScalarType squishing_rate)
{
	/* Step 1: Get initial distances to each neighbor and initial
	 * angles between the point Pi, each neighbor Nij, and the most
	 * collinear neighbor of Nij.
	 */
	ScalarType initial_average_distance;
	SparseMatrix distances_to_neighbors =
		neighbors_distances_matrix(begin, end, neighbors, callback, initial_average_distance);
	SparseMatrixNeighborsPair angles_and_neighbors =
		angles_matrix_and_neighbors(neighbors, data);

	/* Step 2: Optionally preprocess the data using PCA
	 * (skipped for now).
	 */
	ScalarType no_improvement_counter = 0, normal_counter = 0;
	ScalarType current_multiplier = squishing_rate;
	ScalarType learning_rate = initial_average_distance;
	ScalarType best_error = DBL_MAX, current_error, point_error;
	/* Step 3: Do until no improvement is made for some period
	 * (or until max_iteration number is reached):
	 */
	while (((no_improvement_counter++ < max_number_of_iterations_without_improvement)
			|| (current_multiplier >  multiplier_treshold))
			&& (normal_counter++ < max_iteration))
	{
		/* Step 3a: Scale the data in non-preserved dimensions
		 * by a factor of squishing_rate.
		 */
		data.bottomRows(data.rows() - target_dimension) *= squishing_rate;
		while (average_neighbor_distance(data, neighbors) < initial_average_distance)
		{
			data.topRows(target_dimension) /= squishing_rate;
		}
		current_multiplier *= squishing_rate;

		/* Step 3b: Restore the previously computed relationships
		 * (distances to neighbors and angles to ...) by adjusting
		 * data points in first target_dimension dimensions.
		 */
		/* Start adjusting from a random point */
		IndexType start_point_index = std::rand() % data.cols();
		std::deque<IndexType> points_to_adjust;
		points_to_adjust.push_back(start_point_index);
		ScalarType steps_made = 0;
		current_error = 0;
		std::set<IndexType> adjusted_points;

		while (!points_to_adjust.empty())
		{
			IndexType current_point_index = points_to_adjust.front();
			points_to_adjust.pop_front();
			if (adjusted_points.count(current_point_index) == 0)
			{
			DataForErrorFunc error_func_data = {
					distances_to_neighbors,
					angles_and_neighbors.first,
					neighbors,
					angles_and_neighbors.second,
					adjusted_points,
					initial_average_distance
				};
				adjust_point_at_index(current_point_index, data, target_dimension,
									learning_rate, error_func_data, point_error);
				current_error += point_error;
				/* Insert all neighbors into deque */
				std::copy(neighbors[current_point_index].begin(),
				          neighbors[current_point_index].end(),
				          std::back_inserter(points_to_adjust));
				adjusted_points.insert(current_point_index);
			}
		}

		if (steps_made > data.cols())
			learning_rate *= learning_rate_grow_factor;
		else
			learning_rate *= learning_rate_shrink_factor;

		if (current_error < best_error)
		{
			best_error = current_error;
			no_improvement_counter = 0;
		}
	}

	data.conservativeResize(target_dimension, Eigen::NoChange);
	data.transposeInPlace();
}
Esempio n. 13
0
void EigenSolverVTK<FloatType>::eigenSystemInternal(const DenseMatrix<FloatType> &M, FloatType **eigenvectors, FloatType *eigenvalues) const
{
	MLIB_ASSERT_STR(M.isSymmetric(), "can only handle symmetric matrices");
    const unsigned int rows = M.rows();
    MLIB_ASSERT_STR(M.square() && M.rows() >= 2, "invalid matrix dimensions in EigenSolverVTK<T>::eigenSystem");
    int i, j, k, iq, ip, numPos, n = int(rows);
    FloatType tresh, theta, tau, t, sm, s, h, g, c, tmp;
    FloatType bspace[4], zspace[4];
    FloatType *b = bspace;
    FloatType *z = zspace;

    //
    // Jacobi iteration destroys the matrix so create a temporary copy
    //
    DenseMatrix<FloatType> a = M;
    
    //
    // only allocate memory if the matrix is large
    //
    if (n > 4)
    {
        b = new FloatType[n];
        z = new FloatType[n];
    }

    //
    // initialize
    //
    for (ip = 0; ip<n; ip++)
    {
        for (iq = 0; iq<n; iq++)
        {
            eigenvectors[ip][iq] = 0.0;
        }
        eigenvectors[ip][ip] = 1.0;
    }
    for (ip = 0; ip<n; ip++)
    {
        b[ip] = a(ip, ip);
        eigenvalues[ip] = FloatType(a(ip, ip));
        z[ip] = 0.0;
    }

    // begin rotation sequence
    for (i = 0; i<VTK_MAX_ROTATIONS; i++)
    {
        sm = 0.0;
        for (ip = 0; ip<n - 1; ip++)
        {
            for (iq = ip + 1; iq<n; iq++)
            {
                sm += fabs(a(ip, iq));
            }
        }
        if (sm == 0.0)
        {
            break;
        }

        if (i < 3)                                // first 3 sweeps
        {
            tresh = (FloatType)0.2*sm / (n*n);
        }
        else
        {
            tresh = (FloatType)0.0;
        }

        for (ip = 0; ip<n - 1; ip++)
        {
            for (iq = ip + 1; iq<n; iq++)
            {
                g = FloatType(100.0*fabs(a(ip, iq)));

                // after 4 sweeps
                if (i > 3 && (fabs(eigenvalues[ip]) + g) == fabs(eigenvalues[ip])
                    && (fabs(eigenvalues[iq]) + g) == fabs(eigenvalues[iq]))
                {
                    a(ip, iq) = 0.0;
                }
                else if (fabs(a(ip, iq)) > tresh)
                {
                    h = eigenvalues[iq] - eigenvalues[ip];
                    if ((fabs(h) + g) == fabs(h))
                    {
                        t = (a(ip, iq)) / h;
                    }
                    else
                    {
                        theta = (FloatType)0.5*h / (a(ip, iq));
                        t = (FloatType)1.0 / (fabs(theta) + sqrt((FloatType)1.0 + theta*theta));
                        if (theta < 0.0)
                        {
                            t = -t;
                        }
                    }
                    c = (FloatType)1.0 / sqrt(1 + t*t);
                    s = t*c;
                    tau = s / ((FloatType)1.0 + c);
                    h = t*a(ip, iq);
                    z[ip] -= h;
                    z[iq] += h;
                    eigenvalues[ip] -= FloatType(h);
                    eigenvalues[iq] += FloatType(h);
                    a(ip, iq) = (FloatType)0.0;

                    // ip already shifted left by 1 unit
                    for (j = 0; j <= ip - 1; j++)
                    {
                        VTK_ROTATE(a, j, ip, j, iq);
                    }
                    // ip and iq already shifted left by 1 unit
                    for (j = ip + 1; j <= iq - 1; j++)
                    {
                        VTK_ROTATE(a, ip, j, j, iq);
                    }
                    // iq already shifted left by 1 unit
                    for (j = iq + 1; j<n; j++)
                    {
                        VTK_ROTATE(a, ip, j, iq, j);
                    }
                    for (j = 0; j<n; j++)
                    {
#pragma warning ( disable : 4244 )
                        VTK_ROTATE2(eigenvectors, j, ip, j, iq);
#pragma warning ( default : 4244 )
                    }
                }
            }
        }

        for (ip = 0; ip<n; ip++)
        {
            b[ip] += z[ip];
            eigenvalues[ip] = FloatType(b[ip]);
            z[ip] = 0.0;
        }
    }

    if (i >= VTK_MAX_ROTATIONS)
    {
        //return false;
    }

    // sort eigenfunctions                 these changes do not affect accuracy 
    for (j = 0; j<n - 1; j++)                  // boundary incorrect
    {
        k = j;
        tmp = eigenvalues[k];
        for (i = j + 1; i<n; i++)                // boundary incorrect, shifted already
        {
            if (eigenvalues[i] >= tmp)                   // why exchage if same?
            {
                k = i;
                tmp = eigenvalues[k];
            }
        }
        if (k != j)
        {
            eigenvalues[k] = eigenvalues[j];
            eigenvalues[j] = FloatType(tmp);
            for (i = 0; i<n; i++)
            {
                tmp = eigenvectors[i][j];
                eigenvectors[i][j] = eigenvectors[i][k];
                eigenvectors[i][k] = FloatType(tmp);
            }
        }
    }

    //
    // insure eigenvector consistency (i.e., Jacobi can compute vectors that
    // are negative of one another (.707,.707,0) and (-.707,-.707,0). This can
    // reek havoc in hyperstreamline/other stuff. We will select the most
    // positive eigenvector.
    //
    int ceil_half_n = (n >> 1) + (n & 1);
    for (j = 0; j<n; j++)
    {
        for (numPos = 0, i = 0; i<n; i++)
        {
            if (eigenvectors[i][j] >= 0.0)
            {
                numPos++;
            }
        }
        //    if ( numPos < ceil(double(n)/double(2.0)) )
        if (numPos < ceil_half_n)
        {
            for (i = 0; i<n; i++)
            {
                eigenvectors[i][j] *= (FloatType)-1.0;
            }
        }
    }

    if (n > 4)
    {
        delete[] b;
        delete[] z;
    }
}