Ejemplo n.º 1
0
ccPlane* ccGenericPointCloud::fitPlane(double* rms /*= 0*/)
{
	//number of points
	unsigned count = size();
	if (count<3)
		return 0;

	CCLib::Neighbourhood Yk(this);

	//we determine plane normal by computing the smallest eigen value of M = 1/n * S[(p-µ)*(p-µ)']
	CCLib::SquareMatrixd eig = Yk.computeCovarianceMatrix().computeJacobianEigenValuesAndVectors();

	//invalid matrix?
	if (!eig.isValid())
	{
		//ccConsole::Warning("[ccPointCloud::fitPlane] Failed to compute plane/normal for cloud '%s'",getName());
		return 0;
	}
	eig.sortEigenValuesAndVectors();

	//plane equation
	PointCoordinateType theLSQPlane[4];

	//the smallest eigen vector corresponds to the "least square best fitting plane" normal
	double vec[3];
	eig.getEigenValueAndVector(2,vec);
	//PointCoordinateType sign = (vec[2] < 0.0 ? -1.0 : 1.0);  //plane normal (always with a positive 'Z' by default)
	for (unsigned i=0;i<3;++i)
		theLSQPlane[i]=/*sign*/(PointCoordinateType)vec[i];
	CCVector3 N(theLSQPlane);

	//we also get centroid
	const CCVector3* G = Yk.getGravityCenter();
	assert(G);

	//eventually we just have to compute 'constant' coefficient a3
	//we use the fact that the plane pass through the centroid --> GM.N = 0 (scalar prod)
	//i.e. a0*G[0]+a1*G[1]+a2*G[2]=a3
	theLSQPlane[3] =  G->dot(N);

	//least-square fitting RMS
	if (rms)
	{
		placeIteratorAtBegining();
		*rms = 0.0;
		for (unsigned k=0;k<count;++k)
		{
			double d = (double)CCLib::DistanceComputationTools::computePoint2PlaneDistance(getNextPoint(),theLSQPlane);
			*rms += d*d;
		}
		*rms = sqrt(*rms)/(double)count;
	}

	//we has a plane primitive to the cloud
	eig.getEigenValueAndVector(0,vec); //main direction
	CCVector3 X(vec[0],vec[1],vec[2]); //plane normal
	//eig.getEigenValueAndVector(1,vec); //intermediate direction
	//CCVector3 Y(vec[0],vec[1],vec[2]); //plane normal
	CCVector3 Y = N * X;

	//we eventually check for plane extents
	PointCoordinateType minX=0.0,maxX=0.0,minY=0.0,maxY=0.0;
	placeIteratorAtBegining();
	for (unsigned k=0;k<count;++k)
	{
		//projetion into local 2D plane ref.
		CCVector3 P = *getNextPoint() - *G;
		PointCoordinateType x2D = P.dot(X);
		PointCoordinateType y2D = P.dot(Y);

		if (k!=0)
		{
			if (minX<x2D)
				minX=x2D;
			else if (maxX>x2D)
				maxX=x2D;
			if (minY<y2D)
				minY=y2D;
			else if (maxY>y2D)
				maxY=y2D;
		}
		else
		{
			minX=maxX=x2D;
			minY=maxY=y2D;
		}
	}

	//we recenter plane (as it is not always the case!)
	float dX = maxX-minX;
	float dY = maxY-minY;
	CCVector3 Gt = *G + X * (minX+dX*0.5);
	Gt += Y * (minY+dY*0.5);
	ccGLMatrix glMat(X,Y,N,Gt);

	return new ccPlane(dX,dY,&glMat);
}
Ejemplo n.º 2
0
bool ccGenericMesh::laplacianSmooth(unsigned nbIteration, float factor, CCLib::GenericProgressCallback* progressCb/*=0*/)
{
    if (!m_associatedCloud)
        return false;

    //vertices
    unsigned vertCount = m_associatedCloud->size();
    //triangles
    unsigned faceCount = size();
    if (!vertCount || !faceCount)
        return false;

    GenericChunkedArray<3,PointCoordinateType>* verticesDisplacement = new GenericChunkedArray<3,PointCoordinateType>;
    if (!verticesDisplacement->resize(vertCount))
    {
        //not enough memory
        verticesDisplacement->release();
        return false;
    }

    //compute the number of edges to which belong each vertex
    unsigned* edgesCount = new unsigned[vertCount];
    if (!edgesCount)
    {
        //not enough memory
        verticesDisplacement->release();
        return false;
    }
    memset(edgesCount, 0, sizeof(unsigned)*vertCount);
    placeIteratorAtBegining();
    for(unsigned j=0; j<faceCount; j++)
    {
        const CCLib::TriangleSummitsIndexes* tri = getNextTriangleIndexes();
        edgesCount[tri->i1]+=2;
        edgesCount[tri->i2]+=2;
        edgesCount[tri->i3]+=2;
    }

    //progress dialog
    CCLib::NormalizedProgress* nProgress = 0;
    if (progressCb)
    {
        unsigned totalSteps = nbIteration;
        nProgress = new CCLib::NormalizedProgress(progressCb,totalSteps);
        progressCb->setMethodTitle("Laplacian smooth");
        progressCb->setInfo(qPrintable(QString("Iterations: %1\nVertices: %2\nFaces: %3").arg(nbIteration).arg(vertCount).arg(faceCount)));
        progressCb->start();
    }

    //repeat Laplacian smoothing iterations
    for(unsigned iter = 0; iter < nbIteration; iter++)
    {
        verticesDisplacement->fill(0);

        //for each triangle
        placeIteratorAtBegining();
        for(unsigned j=0; j<faceCount; j++)
        {
            const CCLib::TriangleSummitsIndexes* tri = getNextTriangleIndexes();

            const CCVector3* A = m_associatedCloud->getPoint(tri->i1);
            const CCVector3* B = m_associatedCloud->getPoint(tri->i2);
            const CCVector3* C = m_associatedCloud->getPoint(tri->i3);

            CCVector3 dAB = (*B-*A);
            CCVector3 dAC = (*C-*A);
            CCVector3 dBC = (*C-*B);

            CCVector3* dA = (CCVector3*)verticesDisplacement->getValue(tri->i1);
            (*dA) += dAB+dAC;
            CCVector3* dB = (CCVector3*)verticesDisplacement->getValue(tri->i2);
            (*dB) += dBC-dAB;
            CCVector3* dC = (CCVector3*)verticesDisplacement->getValue(tri->i3);
            (*dC) -= dAC+dBC;
        }

        if (nProgress && !nProgress->oneStep())
        {
            //cancelled by user
            break;
        }

        //apply displacement
        verticesDisplacement->placeIteratorAtBegining();
        for (unsigned i=0; i<vertCount; i++)
        {
            //this is a "persistent" pointer and we know what type of cloud is behind ;)
            CCVector3* P = const_cast<CCVector3*>(m_associatedCloud->getPointPersistentPtr(i));
            const CCVector3* d = (const CCVector3*)verticesDisplacement->getValue(i);
            (*P) += (*d)*(factor/(PointCoordinateType)edgesCount[i]);
        }
    }

    m_associatedCloud->updateModificationTime();

    if (hasNormals())
        computeNormals();

    if (verticesDisplacement)
        verticesDisplacement->release();
    verticesDisplacement=0;

    if (edgesCount)
        delete[] edgesCount;
    edgesCount=0;

    if (nProgress)
        delete nProgress;
    nProgress=0;

    return true;
}
Ejemplo n.º 3
0
bool ccGenericMesh::computeNormals()
{
    if (!m_associatedCloud || !m_associatedCloud->isA(CC_POINT_CLOUD)) //TODO
        return false;

    unsigned triCount = size();
    if (triCount==0)
    {
        ccLog::Error("[ccGenericMesh::computeNormals] Empty mesh!");
        return false;
    }
    unsigned vertCount=m_associatedCloud->size();
    if (vertCount<3)
    {
        ccLog::Error("[ccGenericMesh::computeNormals] Not enough vertices! (<3)");
        return false;
    }

    ccPointCloud* cloud = static_cast<ccPointCloud*>(m_associatedCloud);

    //we instantiate a temporary structure to store each vertex normal (uncompressed)
    NormsTableType* theNorms = new NormsTableType;
    if (!theNorms->reserve(vertCount))
    {
        theNorms->release();
        return false;
    }
    theNorms->fill(0);

    //allocate compressed normals array on vertices cloud
    bool normalsWereAllocated = cloud->hasNormals();
    if (!normalsWereAllocated && !cloud->resizeTheNormsTable())
    {
        theNorms->release();
        return false;
    }

    //for each triangle
    placeIteratorAtBegining();
    {
        for (unsigned i=0; i<triCount; ++i)
        {
            CCLib::TriangleSummitsIndexes* tsi = getNextTriangleIndexes();

            assert(tsi->i1<vertCount && tsi->i2<vertCount && tsi->i3<vertCount);
            const CCVector3 *A = cloud->getPoint(tsi->i1);
            const CCVector3 *B = cloud->getPoint(tsi->i2);
            const CCVector3 *C = cloud->getPoint(tsi->i3);

            //compute face normal (right hand rule)
            CCVector3 N = (*B-*A).cross(*C-*A);
            //N.normalize(); //DGM: no normalization = weighting by surface!

            //we add this normal to all triangle vertices
            PointCoordinateType* N1 = theNorms->getValue(tsi->i1);
            CCVector3::vadd(N1,N.u,N1);
            PointCoordinateType* N2 = theNorms->getValue(tsi->i2);
            CCVector3::vadd(N2,N.u,N2);
            PointCoordinateType* N3 = theNorms->getValue(tsi->i3);
            CCVector3::vadd(N3,N.u,N3);
        }
    }

    //for each vertex
    {
        for (unsigned i=0; i<vertCount; i++)
        {
            PointCoordinateType* N = theNorms->getValue(i);
            CCVector3::vnormalize(N);
            cloud->setPointNormal(i,N);
            theNorms->forwardIterator();
        }
    }

    showNormals(true);
    if (!normalsWereAllocated)
        cloud->showNormals(true);

    //theNorms->clear();
    theNorms->release();
    theNorms=0;

    return true;
}
Ejemplo n.º 4
0
bool ccGenericMesh::processScalarField(MESH_SCALAR_FIELD_PROCESS process)
{
    if (!m_associatedCloud || !m_associatedCloud->isScalarFieldEnabled())
        return false;

    unsigned nPts = m_associatedCloud->size();

    //instantiate memory for per-vertex mean SF
    DistanceType* meanSF = new DistanceType[nPts];
    if (!meanSF)
    {
        //Not enough memory!
        return false;
    }

    //per-vertex counters
    unsigned *count = new unsigned[nPts];
    if (!count)
    {
        //Not enough memory!
        delete[] meanSF;
        return false;
    }

    //init arrays
    unsigned i;
    for (i=0; i<nPts; ++i)
    {
        meanSF[i] = m_associatedCloud->getPointScalarValue(i);
        count[i] = 1;
    }

    //for each triangle
    unsigned nTri = size();
    placeIteratorAtBegining();
    for (i=0; i<nTri; ++i)
    {
        const CCLib::TriangleSummitsIndexes* tsi = getNextTriangleIndexes(); //DGM: getNextTriangleIndexes is faster for mesh groups!

        //compute the sum of all connected vertices SF values
        meanSF[tsi->i1] += m_associatedCloud->getPointScalarValue(tsi->i2);
        meanSF[tsi->i2] += m_associatedCloud->getPointScalarValue(tsi->i3);
        meanSF[tsi->i3] += m_associatedCloud->getPointScalarValue(tsi->i1);

        //TODO DGM: we could weight this by the vertices distance?
        ++count[tsi->i1];
        ++count[tsi->i2];
        ++count[tsi->i3];
    }

    for (i=0; i<nPts; ++i)
        meanSF[i] /= (DistanceType)count[i];

    switch (process)
    {
    case SMOOTH_MESH_SF:
    {
        //Smooth = mean value
        for (i=0; i<nPts; ++i)
            m_associatedCloud->setPointScalarValue(i,meanSF[i]);
    }
    break;
    case ENHANCE_MESH_SF:
    {
        //Enhance = old value + (old value - mean value)
        for (i=0; i<nPts; ++i)
        {
            DistanceType v = 2.0f*m_associatedCloud->getPointScalarValue(i) - meanSF[i];
            m_associatedCloud->setPointScalarValue(i,v > 0.0f ? v : 0.0f);
        }
    }
    break;
    }

    delete[] meanSF;
    delete[] count;

    return true;
}