bool ccRegistrationTools::ICP( ccHObject* data, ccHObject* model, ccGLMatrix& transMat, double &finalScale, double& finalRMS, unsigned& finalPointCount, double minRMSDecrease, unsigned maxIterationCount, unsigned randomSamplingLimit, bool removeFarthestPoints, CCLib::ICPRegistrationTools::CONVERGENCE_TYPE method, bool adjustScale, double finalOverlapRatio/*=1.0*/, bool useDataSFAsWeights/*=false*/, bool useModelSFAsWeights/*=false*/, int filters/*=CCLib::ICPRegistrationTools::SKIP_NONE*/, int maxThreadCount/*=0*/, QWidget* parent/*=0*/) { //progress bar ccProgressDialog pDlg(false, parent); Garbage<CCLib::GenericIndexedCloudPersist> cloudGarbage; //if the 'model' entity is a mesh, we need to sample points on it CCLib::GenericIndexedCloudPersist* modelCloud = nullptr; ccGenericMesh* modelMesh = nullptr; if (model->isKindOf(CC_TYPES::MESH)) { modelMesh = ccHObjectCaster::ToGenericMesh(model); modelCloud = modelMesh->getAssociatedCloud(); } else { modelCloud = ccHObjectCaster::ToGenericPointCloud(model); } //if the 'data' entity is a mesh, we need to sample points on it CCLib::GenericIndexedCloudPersist* dataCloud = nullptr; if (data->isKindOf(CC_TYPES::MESH)) { dataCloud = CCLib::MeshSamplingTools::samplePointsOnMesh(ccHObjectCaster::ToGenericMesh(data), s_defaultSampledPointsOnDataMesh, &pDlg); if (!dataCloud) { ccLog::Error("[ICP] Failed to sample points on 'data' mesh!"); return false; } cloudGarbage.add(dataCloud); } else { dataCloud = ccHObjectCaster::ToGenericPointCloud(data); } //we activate a temporary scalar field for registration distances computation CCLib::ScalarField* dataDisplayedSF = nullptr; int oldDataSfIdx = -1, dataSfIdx = -1; //if the 'data' entity is a real ccPointCloud, we can even create a proper temporary SF for registration distances if (data->isA(CC_TYPES::POINT_CLOUD)) { ccPointCloud* pc = static_cast<ccPointCloud*>(data); dataDisplayedSF = pc->getCurrentDisplayedScalarField(); oldDataSfIdx = pc->getCurrentInScalarFieldIndex(); dataSfIdx = pc->getScalarFieldIndexByName(REGISTRATION_DISTS_SF); if (dataSfIdx < 0) dataSfIdx = pc->addScalarField(REGISTRATION_DISTS_SF); if (dataSfIdx >= 0) pc->setCurrentScalarField(dataSfIdx); else { ccLog::Error("[ICP] Couldn't create temporary scalar field! Not enough memory?"); return false; } } else { if (!dataCloud->enableScalarField()) { ccLog::Error("[ICP] Couldn't create temporary scalar field! Not enough memory?"); return false; } } //add a 'safety' margin to input ratio static double s_overlapMarginRatio = 0.2; finalOverlapRatio = std::max(finalOverlapRatio, 0.01); //1% minimum //do we need to reduce the input point cloud (so as to be close //to the theoretical number of overlapping points - but not too //low so as we are not registered yet ;) if (finalOverlapRatio < 1.0 - s_overlapMarginRatio) { //DGM we can now use 'approximate' distances as SAITO algorithm is exact (but with a coarse resolution) //level = 7 if < 1.000.000 //level = 8 if < 10.000.000 //level = 9 if > 10.000.000 int gridLevel = static_cast<int>(floor(log10(static_cast<double>(std::max(dataCloud->size(), modelCloud->size()))))) + 2; gridLevel = std::min(std::max(gridLevel, 7), 9); int result = -1; if (modelMesh) { CCLib::DistanceComputationTools::Cloud2MeshDistanceComputationParams c2mParams; c2mParams.octreeLevel = gridLevel; c2mParams.maxSearchDist = 0; c2mParams.useDistanceMap = true; c2mParams.signedDistances = false; c2mParams.flipNormals = false; c2mParams.multiThread = false; result = CCLib::DistanceComputationTools::computeCloud2MeshDistance(dataCloud, modelMesh, c2mParams, &pDlg); } else { result = CCLib::DistanceComputationTools::computeApproxCloud2CloudDistance( dataCloud, modelCloud, gridLevel, -1, &pDlg); } if (result < 0) { ccLog::Error("Failed to determine the max (overlap) distance (not enough memory?)"); return false; } //determine the max distance that (roughly) corresponds to the input overlap ratio ScalarType maxSearchDist = 0; { unsigned count = dataCloud->size(); std::vector<ScalarType> distances; try { distances.resize(count); } catch (const std::bad_alloc&) { ccLog::Error("Not enough memory!"); return false; } for (unsigned i=0; i<count; ++i) { distances[i] = dataCloud->getPointScalarValue(i); } ParallelSort(distances.begin(), distances.end()); //now look for the max value at 'finalOverlapRatio+margin' percent maxSearchDist = distances[static_cast<unsigned>(std::max(1.0,count*(finalOverlapRatio+s_overlapMarginRatio)))-1]; } //evntually select the points with distance below 'maxSearchDist' //(should roughly correspond to 'finalOverlapRatio + margin' percent) { CCLib::ReferenceCloud* refCloud = new CCLib::ReferenceCloud(dataCloud); cloudGarbage.add(refCloud); unsigned countBefore = dataCloud->size(); unsigned baseIncrement = static_cast<unsigned>(std::max(100.0,countBefore*finalOverlapRatio*0.05)); for (unsigned i=0; i<countBefore; ++i) { if (dataCloud->getPointScalarValue(i) <= maxSearchDist) { if ( refCloud->size() == refCloud->capacity() && !refCloud->reserve(refCloud->size() + baseIncrement) ) { ccLog::Error("Not enough memory!"); return false; } refCloud->addPointIndex(i); } } refCloud->resize(refCloud->size()); dataCloud = refCloud; unsigned countAfter = dataCloud->size(); double keptRatio = static_cast<double>(countAfter)/countBefore; ccLog::Print(QString("[ICP][Partial overlap] Selecting %1 points out of %2 (%3%) for registration").arg(countAfter).arg(countBefore).arg(static_cast<int>(100*keptRatio))); //update the relative 'final overlap' ratio finalOverlapRatio /= keptRatio; } } //weights CCLib::ScalarField* modelWeights = nullptr; CCLib::ScalarField* dataWeights = nullptr; { if (!modelMesh && useModelSFAsWeights) { if (modelCloud == dynamic_cast<CCLib::GenericIndexedCloudPersist*>(model) && model->isA(CC_TYPES::POINT_CLOUD)) { ccPointCloud* pc = static_cast<ccPointCloud*>(model); modelWeights = pc->getCurrentDisplayedScalarField(); if (!modelWeights) ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but model has no displayed scalar field!"); } else { ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but only point cloud scalar fields can be used as weights!"); } } if (useDataSFAsWeights) { if (!dataDisplayedSF) { if (dataCloud == ccHObjectCaster::ToPointCloud(data)) ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but data has no displayed scalar field!"); else ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but only point cloud scalar fields can be used as weights!"); } else { dataWeights = dataDisplayedSF; } } } CCLib::ICPRegistrationTools::RESULT_TYPE result; CCLib::PointProjectionTools::Transformation transform; CCLib::ICPRegistrationTools::Parameters params; { params.convType = method; params.minRMSDecrease = minRMSDecrease; params.nbMaxIterations = maxIterationCount; params.adjustScale = adjustScale; params.filterOutFarthestPoints = removeFarthestPoints; params.samplingLimit = randomSamplingLimit; params.finalOverlapRatio = finalOverlapRatio; params.modelWeights = modelWeights; params.dataWeights = dataWeights; params.transformationFilters = filters; params.maxThreadCount = maxThreadCount; } result = CCLib::ICPRegistrationTools::Register( modelCloud, modelMesh, dataCloud, params, transform, finalRMS, finalPointCount, static_cast<CCLib::GenericProgressCallback*>(&pDlg)); if (result >= CCLib::ICPRegistrationTools::ICP_ERROR) { ccLog::Error("Registration failed: an error occurred (code %i)",result); } else if (result == CCLib::ICPRegistrationTools::ICP_APPLY_TRANSFO) { transMat = FromCCLibMatrix<PointCoordinateType, float>(transform.R, transform.T, transform.s); finalScale = transform.s; } //remove temporary SF (if any) if (dataSfIdx >= 0) { assert(data->isA(CC_TYPES::POINT_CLOUD)); ccPointCloud* pc = static_cast<ccPointCloud*>(data); pc->setCurrentScalarField(oldDataSfIdx); pc->deleteScalarField(dataSfIdx); } return (result < CCLib::ICPRegistrationTools::ICP_ERROR); }
CCLib::ReferenceCloud* qHPR::removeHiddenPoints(CCLib::GenericIndexedCloudPersist* theCloud, const CCVector3& viewPoint, float fParam) { assert(theCloud); unsigned nbPoints = theCloud->size(); if (nbPoints == 0) return 0; //less than 4 points? no need for calculation, we return the whole cloud if (nbPoints < 4) { CCLib::ReferenceCloud* visiblePoints = new CCLib::ReferenceCloud(theCloud); if (!visiblePoints->addPointIndex(0,nbPoints)) //well even for less than 4 points we never know ;) { //not enough memory! delete visiblePoints; visiblePoints = 0; } return visiblePoints; } PointCoordinateType maxRadius = 0; //convert point cloud to an array of double triplets (for qHull) coordT* pt_array = new coordT[(nbPoints+1)*3]; { coordT* _pt_array = pt_array; for (unsigned i=0; i<nbPoints; ++i) { CCVector3 P = *theCloud->getPoint(i) - viewPoint; *_pt_array++ = (coordT)P.x; *_pt_array++ = (coordT)P.y; *_pt_array++ = (coordT)P.z; //we keep track of the highest 'radius' PointCoordinateType r2 = P.norm2(); if (maxRadius < r2) maxRadius = r2; } //we add the view point (Cf. HPR) *_pt_array++ = 0; *_pt_array++ = 0; *_pt_array++ = 0; maxRadius = sqrt(maxRadius); } //apply spherical flipping { maxRadius *= 2.0f*pow(10.0f,fParam); coordT* _pt_array = pt_array; for (unsigned i=0; i<nbPoints; ++i) { CCVector3 P = *theCloud->getPoint(i) - viewPoint; double r = (double)(maxRadius/P.norm()) - 1.0; *_pt_array++ *= r; *_pt_array++ *= r; *_pt_array++ *= r; } } //array to flag points on the convex hull std::vector<bool> pointBelongsToCvxHull; if (!qh_new_qhull(3,nbPoints+1,pt_array,False,(char*)"qhull QJ Qci",0,stderr)) { try { pointBelongsToCvxHull.resize(nbPoints+1,false); } catch(std::bad_alloc) { //not enough memory! delete[] pt_array; return 0; } vertexT *vertex = 0,**vertexp = 0; facetT *facet = 0; FORALLfacets { //if (!facet->simplicial) // error("convhulln: non-simplicial facet"); // should never happen with QJ setT* vertices = qh_facet3vertex(facet); FOREACHvertex_(vertices) { pointBelongsToCvxHull[qh_pointid(vertex->point)] = true; } qh_settempfree(&vertices); } } delete[] pt_array; pt_array=0; qh_freeqhull(!qh_ALL); //free long memory int curlong, totlong; qh_memfreeshort (&curlong, &totlong); //free short memory and memory allocator if (!pointBelongsToCvxHull.empty()) { //compute the number of points belonging to the convex hull unsigned cvxHullSize = 0; { for (unsigned i=0; i<nbPoints; ++i) if (pointBelongsToCvxHull[i]) ++cvxHullSize; } CCLib::ReferenceCloud* visiblePoints = new CCLib::ReferenceCloud(theCloud); if (cvxHullSize!=0 && visiblePoints->reserve(cvxHullSize)) { for (unsigned i=0; i<nbPoints; ++i) if (pointBelongsToCvxHull[i]) visiblePoints->addPointIndex(i); //can't fail, see above return visiblePoints; } else //not enough memory { delete visiblePoints; visiblePoints=0; } } return 0; }
CCLib::ReferenceCloud* qHPR::removeHiddenPoints(CCLib::GenericIndexedCloudPersist* theCloud, float viewPoint[], float fParam) { assert(theCloud); unsigned i,nbPoints = theCloud->size(); if (nbPoints==0) return 0; CCLib::ReferenceCloud* newCloud = new CCLib::ReferenceCloud(theCloud); //less than 4 points ? no need for calculation, we return the whole cloud if (nbPoints<4) { if (!newCloud->reserve(nbPoints)) //well, we never know ;) { //not enough memory! delete newCloud; return 0; } newCloud->addPointIndex(0,nbPoints); return newCloud; } //view point coordT Cx = viewPoint[0]; coordT Cy = viewPoint[1]; coordT Cz = viewPoint[2]; float* radius = new float[nbPoints]; if (!radius) { //not enough memory! delete newCloud; return 0; } float r,maxRadius = 0.0; //table of points coordT* pt_array = new coordT[(nbPoints+1)*3]; coordT* _pt_array = pt_array; theCloud->placeIteratorAtBegining(); //#define BACKUP_PROJECTED_CLOUDS #ifdef BACKUP_PROJECTED_CLOUDS FILE* fp = fopen("output_centered.asc","wt"); #endif double x,y,z; for (i=0;i<nbPoints;++i) { const CCVector3* P = theCloud->getNextPoint(); *(_pt_array++)=x=coordT(P->x)-Cx; *(_pt_array++)=y=coordT(P->y)-Cy; *(_pt_array++)=z=coordT(P->z)-Cz; //we pre-compute the radius ... r = (float)sqrt(x*x+y*y+z*z); //in order to determine the max radius if (maxRadius<r) maxRadius = r; radius[i] = r; #ifdef BACKUP_PROJECTED_CLOUDS fprintf(fp,"%f %f %f %f\n",x,y,z,r); #endif } //we add the view point (Cf. HPR) *(_pt_array++)=0.0; *(_pt_array++)=0.0; *(_pt_array++)=0.0; #ifdef BACKUP_PROJECTED_CLOUDS fprintf(fp,"%f %f %f %f\n",0,0,0,0); fclose(fp); #endif maxRadius *= 2.0f*pow(10.0f,fParam); _pt_array = pt_array; #ifdef BACKUP_PROJECTED_CLOUDS fp = fopen("output_transformed.asc","wt"); #endif for (i=0;i<nbPoints;++i) { //Spherical flipping r = maxRadius/radius[i]-1.0f; #ifndef BACKUP_PROJECTED_CLOUDS *(_pt_array++) *= double(r); *(_pt_array++) *= double(r); *(_pt_array++) *= double(r); #else x = *_pt_array * double(r); *(_pt_array++) = x; y = *_pt_array * double(r); *(_pt_array++) = y; z = *_pt_array * double(r); *(_pt_array++) = z; fprintf(fp,"%f %f %f %f\n",x,y,z,r); #endif } #ifdef BACKUP_PROJECTED_CLOUDS fclose(fp); #endif //we re-use the radius to record if each point belongs to the convex hull //delete[] radius; //uchar* pointBelongsToCvxHull = new uchar[nbPoints]; uchar* pointBelongsToCvxHull = (uchar*)radius; memset(pointBelongsToCvxHull,0,sizeof(uchar)*(nbPoints+1)); if (!qh_new_qhull(3,nbPoints+1,pt_array,False,(char*)"qhull QJ s Qci Tcv",0,stderr)) { vertexT *vertex,**vertexp; facetT *facet; setT *vertices; int j, i = 0; FORALLfacets { /*if (!facet->simplicial) error("convhulln: non-simplicial facet"); // should never happen with QJ */ j = 0; vertices = qh_facet3vertex (facet); FOREACHvertex_(vertices) { pointBelongsToCvxHull[qh_pointid(vertex->point)]=1; ++j; } qh_settempfree(&vertices); if (j < 3) printf("Warning, facet %d only has %d vertices\n",i,j); // likewise but less fatal i++; } }