bool ccKdTree::convertCellIndexToRandomColor() { if (!m_associatedGenericCloud || !m_associatedGenericCloud->isA(CC_TYPES::POINT_CLOUD)) return false; //get leaves std::vector<Leaf*> leaves; if (!getLeaves(leaves) || leaves.empty()) return false; ccPointCloud* pc = static_cast<ccPointCloud*>(m_associatedGenericCloud); if (!pc->resizeTheRGBTable()) return false; //for each cell for (size_t i=0; i<leaves.size(); ++i) { colorType col[3]; ccColor::Generator::Random(col); CCLib::ReferenceCloud* subset = leaves[i]->points; if (subset) { for (unsigned j=0; j<subset->size(); ++j) pc->setPointColor(subset->getPointGlobalIndex(j),col); } } pc->showColors(true); return true; }
void ccAlignDlg::dataSamplingRateChanged(double value) { QString message("An error occured"); CC_SAMPLING_METHOD method = getSamplingMethod(); float rate = (float)dataSamplingRate->value()/(float)dataSamplingRate->maximum(); if(method == SPACE) rate = 1.0f-rate; dataSample->setSliderPosition((unsigned)((float)dataSample->maximum()*rate)); switch(method) { case SPACE: { CCLib::ReferenceCloud* tmpCloud = getSampledData(); //DGM FIXME: wow! you generate a spatially sampled cloud just to display its size?! if (tmpCloud) { message = QString("distance units (%1 remaining points)").arg(tmpCloud->size()); delete tmpCloud; } } break; case RANDOM: { message = QString("remaining points (%1%)").arg(rate*100.0f,0,'f',1); } break; case OCTREE: { CCLib::ReferenceCloud* tmpCloud = getSampledData(); //DGM FIXME: wow! you generate a spatially sampled cloud just to display its size?! if (tmpCloud) { message = QString("%1 remaining points").arg(tmpCloud->size()); delete tmpCloud; } } break; default: { unsigned remaining = (unsigned)(rate * (float)dataObject->size()); message = QString("%1 remaining points").arg(remaining); } break; } dataRemaining->setText(message); }
void ccAlignDlg::estimateDelta() { unsigned i, nb; float meanDensity, meanSqrDensity, dev, value; ccProgressDialog pDlg(false,this); CCLib::ReferenceCloud *sampledData = getSampledData(); //we have to work on a copy of the cloud in order to prevent the algorithms from modifying the original cloud. CCLib::ChunkedPointCloud* cloud = new CCLib::ChunkedPointCloud(); cloud->reserve(sampledData->size()); for(i=0; i<sampledData->size(); i++) cloud->addPoint(*sampledData->getPoint(i)); cloud->enableScalarField(); CCLib::GeometricalAnalysisTools::computeLocalDensity(cloud, &pDlg); nb = 0; meanDensity = 0.; meanSqrDensity = 0.; for(i=0; i<cloud->size(); i++) { value = cloud->getPointScalarValue(i); if(value > ZERO_TOLERANCE) { value = 1/value; meanDensity += value; meanSqrDensity += value*value; nb++; } } meanDensity /= (float)nb; meanSqrDensity /= (float)nb; dev = meanSqrDensity-(meanDensity*meanDensity); delta->setValue(meanDensity+dev); delete sampledData; delete cloud; }
int ccFastMarchingForNormsDirection::updateResolvedTable(ccGenericPointCloud* theCloud, GenericChunkedArray<1,uchar> &resolved, NormsIndexesTableType* theNorms) { if (!initialized) return -1; int count=0; for (unsigned i=0;i<activeCells.size();++i) { DirectionCell* aCell = (DirectionCell*)theGrid[activeCells[i]]; CCLib::ReferenceCloud* Yk = theOctree->getPointsInCell(aCell->cellCode,gridLevel,true); if (!Yk) continue; Yk->placeIteratorAtBegining(); for (unsigned k=0;k<Yk->size();++k) { unsigned index = Yk->getCurrentPointGlobalIndex(); resolved.setValue(index,1); //resolvedValue=1 const normsType& norm = theNorms->getValue(index); if (CCVector3::vdot(ccNormalVectors::GetNormal(norm),aCell->N)<0.0) { PointCoordinateType newN[3]; const PointCoordinateType* N = ccNormalVectors::GetNormal(norm); newN[0]=-N[0]; newN[1]=-N[1]; newN[2]=-N[2]; theNorms->setValue(index,ccNormalVectors::GetNormIndex(newN)); } //norm = NormalVectors::getNormIndex(aCell->N); //theNorms->setValue(index,&norm); theCloud->setPointScalarValue(index,aCell->T); //theCloud->setPointScalarValue(index,aCell->v); Yk->forwardIterator(); ++count; } } return count; }
bool ccKdTree::convertCellIndexToSF() { if (!m_associatedGenericCloud || !m_associatedGenericCloud->isA(CC_TYPES::POINT_CLOUD)) return false; //get leaves std::vector<Leaf*> leaves; if (!getLeaves(leaves) || leaves.empty()) return false; ccPointCloud* pc = static_cast<ccPointCloud*>(m_associatedGenericCloud); const char c_defaultSFName[] = "Kd-tree indexes"; int sfIdx = pc->getScalarFieldIndexByName(c_defaultSFName); if (sfIdx < 0) sfIdx = pc->addScalarField(c_defaultSFName); if (sfIdx < 0) { ccLog::Error("Not enough memory!"); return false; } pc->setCurrentScalarField(sfIdx); //for each cell for (size_t i=0; i<leaves.size(); ++i) { CCLib::ReferenceCloud* subset = leaves[i]->points; if (subset) { for (unsigned j=0; j<subset->size(); ++j) subset->setPointScalarValue(j,(ScalarType)i); } } pc->getScalarField(sfIdx)->computeMinAndMax(); pc->setCurrentDisplayedScalarField(sfIdx); pc->showSF(true); return true; }
bool ccPointPairRegistrationDlg::convertToSphereCenter(CCVector3d& P, ccHObject* entity, PointCoordinateType& sphereRadius) { sphereRadius = -PC_ONE; if ( !entity || !useSphereToolButton->isChecked() || !entity->isKindOf(CC_TYPES::POINT_CLOUD) ) //only works with cloud right now { //nothing to do return true; } //we'll now try to detect the sphere double searchRadius = radiusDoubleSpinBox->value(); double maxRMSPercentage = maxRmsSpinBox->value() / 100.0; ccGenericPointCloud* cloud = static_cast<ccGenericPointCloud*>(entity); assert(cloud); //crop points inside a box centered on the current point ccBBox box; box.add(CCVector3::fromArray((P - CCVector3d(1,1,1)*searchRadius).u)); box.add(CCVector3::fromArray((P + CCVector3d(1,1,1)*searchRadius).u)); CCLib::ReferenceCloud* part = cloud->crop(box,true); bool success = false; if (part && part->size() > 16) { PointCoordinateType radius; CCVector3 C; double rms; ccProgressDialog pDlg(true, this); //first roughly search for the sphere if (CCLib::GeometricalAnalysisTools::detectSphereRobust(part,0.5,C,radius,rms,&pDlg,0.9)) { if (radius / searchRadius < 0.5 || radius / searchRadius > 2.0) { ccLog::Warning(QString("[ccPointPairRegistrationDlg] Detected sphere radius (%1) is too far from search radius!").arg(radius)); } else { //now look again (more precisely) { delete part; box.clear(); box.add(C - CCVector3(1,1,1)*radius*static_cast<PointCoordinateType>(1.05)); //add 5% box.add(C + CCVector3(1,1,1)*radius*static_cast<PointCoordinateType>(1.05)); //add 5% part = cloud->crop(box,true); if (part && part->size() > 16) CCLib::GeometricalAnalysisTools::detectSphereRobust(part,0.5,C,radius,rms,&pDlg,0.99); } ccLog::Print(QString("[ccPointPairRegistrationDlg] Detected sphere radius = %1 (rms = %2)").arg(radius).arg(rms)); if (radius / searchRadius < 0.5 || radius / searchRadius > 2.0) { ccLog::Warning("[ccPointPairRegistrationDlg] Sphere radius is too far from search radius!"); } else if (rms / searchRadius >= maxRMSPercentage) { ccLog::Warning("[ccPointPairRegistrationDlg] RMS is too high!"); } else { sphereRadius = radius; P = CCVector3d::fromArray(C.u); success = true; } } } else { ccLog::Warning("[ccPointPairRegistrationDlg] Failed to fit a sphere around the picked point!"); } } else { //not enough memory? No points inside the ccLog::Warning("[ccPointPairRegistrationDlg] Failed to crop points around the picked point?!"); } if (part) delete part; return success; }
bool ccRegistrationTools::ICP( ccHObject* data, ccHObject* model, ccGLMatrix& transMat, double &finalScale, double& finalRMS, unsigned& finalPointCount, double minRMSDecrease, unsigned maxIterationCount, unsigned randomSamplingLimit, bool removeFarthestPoints, CCLib::ICPRegistrationTools::CONVERGENCE_TYPE method, bool adjustScale, double finalOverlapRatio/*=1.0*/, bool useDataSFAsWeights/*=false*/, bool useModelSFAsWeights/*=false*/, int filters/*=CCLib::ICPRegistrationTools::SKIP_NONE*/, int maxThreadCount/*=0*/, QWidget* parent/*=0*/) { //progress bar QScopedPointer<ccProgressDialog> progressDlg; if (parent) { progressDlg.reset(new ccProgressDialog(false, parent)); } Garbage<CCLib::GenericIndexedCloudPersist> cloudGarbage; //if the 'model' entity is a mesh, we need to sample points on it CCLib::GenericIndexedCloudPersist* modelCloud = nullptr; ccGenericMesh* modelMesh = nullptr; if (model->isKindOf(CC_TYPES::MESH)) { modelMesh = ccHObjectCaster::ToGenericMesh(model); modelCloud = modelMesh->getAssociatedCloud(); } else { modelCloud = ccHObjectCaster::ToGenericPointCloud(model); } //if the 'data' entity is a mesh, we need to sample points on it CCLib::GenericIndexedCloudPersist* dataCloud = nullptr; if (data->isKindOf(CC_TYPES::MESH)) { dataCloud = CCLib::MeshSamplingTools::samplePointsOnMesh(ccHObjectCaster::ToGenericMesh(data), s_defaultSampledPointsOnDataMesh, progressDlg.data()); if (!dataCloud) { ccLog::Error("[ICP] Failed to sample points on 'data' mesh!"); return false; } cloudGarbage.add(dataCloud); } else { dataCloud = ccHObjectCaster::ToGenericPointCloud(data); } //we activate a temporary scalar field for registration distances computation CCLib::ScalarField* dataDisplayedSF = nullptr; int oldDataSfIdx = -1, dataSfIdx = -1; //if the 'data' entity is a real ccPointCloud, we can even create a proper temporary SF for registration distances if (data->isA(CC_TYPES::POINT_CLOUD)) { ccPointCloud* pc = static_cast<ccPointCloud*>(data); dataDisplayedSF = pc->getCurrentDisplayedScalarField(); oldDataSfIdx = pc->getCurrentInScalarFieldIndex(); dataSfIdx = pc->getScalarFieldIndexByName(REGISTRATION_DISTS_SF); if (dataSfIdx < 0) dataSfIdx = pc->addScalarField(REGISTRATION_DISTS_SF); if (dataSfIdx >= 0) pc->setCurrentScalarField(dataSfIdx); else { ccLog::Error("[ICP] Couldn't create temporary scalar field! Not enough memory?"); return false; } } else { if (!dataCloud->enableScalarField()) { ccLog::Error("[ICP] Couldn't create temporary scalar field! Not enough memory?"); return false; } } //add a 'safety' margin to input ratio static double s_overlapMarginRatio = 0.2; finalOverlapRatio = std::max(finalOverlapRatio, 0.01); //1% minimum //do we need to reduce the input point cloud (so as to be close //to the theoretical number of overlapping points - but not too //low so as we are not registered yet ;) if (finalOverlapRatio < 1.0 - s_overlapMarginRatio) { //DGM we can now use 'approximate' distances as SAITO algorithm is exact (but with a coarse resolution) //level = 7 if < 1.000.000 //level = 8 if < 10.000.000 //level = 9 if > 10.000.000 int gridLevel = static_cast<int>(floor(log10(static_cast<double>(std::max(dataCloud->size(), modelCloud->size()))))) + 2; gridLevel = std::min(std::max(gridLevel, 7), 9); int result = -1; if (modelMesh) { CCLib::DistanceComputationTools::Cloud2MeshDistanceComputationParams c2mParams; c2mParams.octreeLevel = gridLevel; c2mParams.maxSearchDist = 0; c2mParams.useDistanceMap = true; c2mParams.signedDistances = false; c2mParams.flipNormals = false; c2mParams.multiThread = false; result = CCLib::DistanceComputationTools::computeCloud2MeshDistance(dataCloud, modelMesh, c2mParams, progressDlg.data()); } else { result = CCLib::DistanceComputationTools::computeApproxCloud2CloudDistance( dataCloud, modelCloud, gridLevel, -1, progressDlg.data()); } if (result < 0) { ccLog::Error("Failed to determine the max (overlap) distance (not enough memory?)"); return false; } //determine the max distance that (roughly) corresponds to the input overlap ratio ScalarType maxSearchDist = 0; { unsigned count = dataCloud->size(); std::vector<ScalarType> distances; try { distances.resize(count); } catch (const std::bad_alloc&) { ccLog::Error("Not enough memory!"); return false; } for (unsigned i=0; i<count; ++i) { distances[i] = dataCloud->getPointScalarValue(i); } ParallelSort(distances.begin(), distances.end()); //now look for the max value at 'finalOverlapRatio+margin' percent maxSearchDist = distances[static_cast<unsigned>(std::max(1.0,count*(finalOverlapRatio+s_overlapMarginRatio)))-1]; } //evntually select the points with distance below 'maxSearchDist' //(should roughly correspond to 'finalOverlapRatio + margin' percent) { CCLib::ReferenceCloud* refCloud = new CCLib::ReferenceCloud(dataCloud); cloudGarbage.add(refCloud); unsigned countBefore = dataCloud->size(); unsigned baseIncrement = static_cast<unsigned>(std::max(100.0,countBefore*finalOverlapRatio*0.05)); for (unsigned i=0; i<countBefore; ++i) { if (dataCloud->getPointScalarValue(i) <= maxSearchDist) { if ( refCloud->size() == refCloud->capacity() && !refCloud->reserve(refCloud->size() + baseIncrement) ) { ccLog::Error("Not enough memory!"); return false; } refCloud->addPointIndex(i); } } refCloud->resize(refCloud->size()); dataCloud = refCloud; unsigned countAfter = dataCloud->size(); double keptRatio = static_cast<double>(countAfter)/countBefore; ccLog::Print(QString("[ICP][Partial overlap] Selecting %1 points out of %2 (%3%) for registration").arg(countAfter).arg(countBefore).arg(static_cast<int>(100*keptRatio))); //update the relative 'final overlap' ratio finalOverlapRatio /= keptRatio; } } //weights CCLib::ScalarField* modelWeights = nullptr; CCLib::ScalarField* dataWeights = nullptr; { if (!modelMesh && useModelSFAsWeights) { if (modelCloud == dynamic_cast<CCLib::GenericIndexedCloudPersist*>(model) && model->isA(CC_TYPES::POINT_CLOUD)) { ccPointCloud* pc = static_cast<ccPointCloud*>(model); modelWeights = pc->getCurrentDisplayedScalarField(); if (!modelWeights) ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but model has no displayed scalar field!"); } else { ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but only point cloud scalar fields can be used as weights!"); } } if (useDataSFAsWeights) { if (!dataDisplayedSF) { if (dataCloud == ccHObjectCaster::ToPointCloud(data)) ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but data has no displayed scalar field!"); else ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but only point cloud scalar fields can be used as weights!"); } else { dataWeights = dataDisplayedSF; } } } CCLib::ICPRegistrationTools::RESULT_TYPE result; CCLib::PointProjectionTools::Transformation transform; CCLib::ICPRegistrationTools::Parameters params; { params.convType = method; params.minRMSDecrease = minRMSDecrease; params.nbMaxIterations = maxIterationCount; params.adjustScale = adjustScale; params.filterOutFarthestPoints = removeFarthestPoints; params.samplingLimit = randomSamplingLimit; params.finalOverlapRatio = finalOverlapRatio; params.modelWeights = modelWeights; params.dataWeights = dataWeights; params.transformationFilters = filters; params.maxThreadCount = maxThreadCount; } result = CCLib::ICPRegistrationTools::Register( modelCloud, modelMesh, dataCloud, params, transform, finalRMS, finalPointCount, static_cast<CCLib::GenericProgressCallback*>(progressDlg.data())); if (result >= CCLib::ICPRegistrationTools::ICP_ERROR) { ccLog::Error("Registration failed: an error occurred (code %i)",result); } else if (result == CCLib::ICPRegistrationTools::ICP_APPLY_TRANSFO) { transMat = FromCCLibMatrix<PointCoordinateType, float>(transform.R, transform.T, transform.s); finalScale = transform.s; } //remove temporary SF (if any) if (dataSfIdx >= 0) { assert(data->isA(CC_TYPES::POINT_CLOUD)); ccPointCloud* pc = static_cast<ccPointCloud*>(data); pc->setCurrentScalarField(oldDataSfIdx); pc->deleteScalarField(dataSfIdx); } return (result < CCLib::ICPRegistrationTools::ICP_ERROR); }
void qHPR::doAction() { assert(m_app); if (!m_app) return; const ccHObject::Container& selectedEntities = m_app->getSelectedEntities(); size_t selNum = selectedEntities.size(); if ( selNum != 1 || !selectedEntities.front()->isA(CC_TYPES::POINT_CLOUD)) { m_app->dispToConsole("Select only one cloud!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } ccPointCloud* cloud = static_cast<ccPointCloud*>(selectedEntities[0]); ccGLWindow* win = m_app->getActiveGLWindow(); if (!win) { m_app->dispToConsole("No active window!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } //display parameters const ccViewportParameters& params = win->getViewportParameters(); if (!params.perspectiveView) { m_app->dispToConsole("Perspective mode only!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } ccHprDlg dlg(m_app->getMainWindow()); if (!dlg.exec()) return; //progress dialog ccProgressDialog progressCb(false,m_app->getMainWindow()); //unique parameter: the octree subdivision level int octreeLevel = dlg.octreeLevelSpinBox->value(); assert(octreeLevel >= 0 && octreeLevel <= CCLib::DgmOctree::MAX_OCTREE_LEVEL); //compute octree if cloud hasn't any ccOctree::Shared theOctree = cloud->getOctree(); if (!theOctree) { theOctree = cloud->computeOctree(&progressCb); if (theOctree && cloud->getParent()) { m_app->addToDB(cloud->getOctreeProxy()); } } if (!theOctree) { m_app->dispToConsole("Couldn't compute octree!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } CCVector3d viewPoint = params.cameraCenter; if (params.objectCenteredView) { CCVector3d PC = params.cameraCenter - params.pivotPoint; params.viewMat.inverse().apply(PC); viewPoint = params.pivotPoint + PC; } //HPR CCLib::ReferenceCloud* visibleCells = 0; { QElapsedTimer eTimer; eTimer.start(); CCLib::ReferenceCloud* theCellCenters = CCLib::CloudSamplingTools::subsampleCloudWithOctreeAtLevel( cloud, static_cast<unsigned char>(octreeLevel), CCLib::CloudSamplingTools::NEAREST_POINT_TO_CELL_CENTER, &progressCb, theOctree.data()); if (!theCellCenters) { m_app->dispToConsole("Error while simplifying point cloud with octree!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } visibleCells = removeHiddenPoints(theCellCenters,viewPoint,3.5); m_app->dispToConsole(QString("[HPR] Cells: %1 - Time: %2 s").arg(theCellCenters->size()).arg(eTimer.elapsed()/1.0e3)); //warning: after this, visibleCells can't be used anymore as a //normal cloud (as it's 'associated cloud' has been deleted). //Only its indexes are valid! (they are corresponding to octree cells) delete theCellCenters; theCellCenters = 0; } if (visibleCells) { //DGM: we generate a new cloud now, instead of playing with the points visiblity! (too confusing for the user) /*if (!cloud->isVisibilityTableInstantiated() && !cloud->resetVisibilityArray()) { m_app->dispToConsole("Visibility array allocation failed! (Not enough memory?)",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } ccPointCloud::VisibilityTableType* pointsVisibility = cloud->getTheVisibilityArray(); assert(pointsVisibility); pointsVisibility->fill(POINT_HIDDEN); //*/ CCLib::ReferenceCloud visiblePoints(theOctree->associatedCloud()); unsigned visiblePointCount = 0; unsigned visibleCellsCount = visibleCells->size(); CCLib::DgmOctree::cellIndexesContainer cellIndexes; if (!theOctree->getCellIndexes(static_cast<unsigned char>(octreeLevel), cellIndexes)) { m_app->dispToConsole("Couldn't fetch the list of octree cell indexes! (Not enough memory?)",ccMainAppInterface::ERR_CONSOLE_MESSAGE); delete visibleCells; return; } for (unsigned i=0; i<visibleCellsCount; ++i) { //cell index unsigned index = visibleCells->getPointGlobalIndex(i); //points in this cell... CCLib::ReferenceCloud Yk(theOctree->associatedCloud()); theOctree->getPointsInCellByCellIndex(&Yk,cellIndexes[index],static_cast<unsigned char>(octreeLevel)); //...are all visible /*unsigned count = Yk.size(); for (unsigned j=0;j<count;++j) pointsVisibility->setValue(Yk.getPointGlobalIndex(j),POINT_VISIBLE); visiblePointCount += count; //*/ if (!visiblePoints.add(Yk)) { m_app->dispToConsole("Not enough memory!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); delete visibleCells; return; } } delete visibleCells; visibleCells = 0; m_app->dispToConsole(QString("[HPR] Visible points: %1").arg(visiblePointCount)); if (visiblePoints.size() == cloud->size()) { m_app->dispToConsole("No points were removed!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); } else { //create cloud from visibility selection ccPointCloud* newCloud = cloud->partialClone(&visiblePoints); if (newCloud) { newCloud->setDisplay(newCloud->getDisplay()); newCloud->setVisible(true); newCloud->setName(cloud->getName()+QString(".visible_points")); cloud->setEnabled(false); //add associated viewport object cc2DViewportObject* viewportObject = new cc2DViewportObject(QString("Viewport")); viewportObject->setParameters(params); newCloud->addChild(viewportObject); m_app->addToDB(newCloud); newCloud->redrawDisplay(); } else { m_app->dispToConsole("Not enough memory!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); } } } //currently selected entities appearance may have changed! m_app->refreshAll(); }
bool ccKdTreeForFacetExtraction::FuseCells( ccKdTree* kdTree, double maxError, CCLib::DistanceComputationTools::ERROR_MEASURES errorMeasure, double maxAngle_deg, PointCoordinateType overlapCoef/*=1*/, bool closestFirst/*=true*/, CCLib::GenericProgressCallback* progressCb/*=0*/) { if (!kdTree) return false; ccGenericPointCloud* associatedGenericCloud = kdTree->associatedGenericCloud(); if (!associatedGenericCloud || !associatedGenericCloud->isA(CC_TYPES::POINT_CLOUD) || maxError < 0.0) return false; //get leaves std::vector<ccKdTree::Leaf*> leaves; if (!kdTree->getLeaves(leaves) || leaves.empty()) return false; //progress notification CCLib::NormalizedProgress nProgress(progressCb, static_cast<unsigned>(leaves.size())); if (progressCb) { progressCb->update(0); if (progressCb->textCanBeEdited()) { progressCb->setMethodTitle("Fuse Kd-tree cells"); progressCb->setInfo(qPrintable(QString("Cells: %1\nMax error: %2").arg(leaves.size()).arg(maxError))); } progressCb->start(); } ccPointCloud* pc = static_cast<ccPointCloud*>(associatedGenericCloud); //sort cells based on their population size (we start by the biggest ones) SortAlgo(leaves.begin(), leaves.end(), DescendingLeafSizeComparison); //set all 'userData' to -1 (i.e. unfused cells) { for (size_t i=0; i<leaves.size(); ++i) { leaves[i]->userData = -1; //check by the way that the plane normal is unit! assert(static_cast<double>(fabs(CCVector3(leaves[i]->planeEq).norm2()) - 1.0) < 1.0e-6); } } // cosine of the max angle between fused 'planes' const double c_minCosNormAngle = cos(maxAngle_deg * CC_DEG_TO_RAD); //fuse all cells, starting from the ones with the best error const int unvisitedNeighborValue = -1; bool cancelled = false; int macroIndex = 1; //starts at 1 (0 is reserved for cells already above the max error) { for (size_t i=0; i<leaves.size(); ++i) { ccKdTree::Leaf* currentCell = leaves[i]; if (currentCell->error >= maxError) currentCell->userData = 0; //0 = special group for cells already above the user defined threshold! //already fused? if (currentCell->userData != -1) { if (progressCb && !nProgress.oneStep()) //process canceled by user { cancelled = true; break; } continue; } //we create a new "macro cell" index currentCell->userData = macroIndex++; //we init the current set of 'fused' points with the cell's points CCLib::ReferenceCloud* currentPointSet = currentCell->points; //get current fused set centroid and normal CCVector3 currentCentroid = *CCLib::Neighbourhood(currentPointSet).getGravityCenter(); CCVector3 currentNormal(currentCell->planeEq); //visited neighbors ccKdTree::LeafSet visitedNeighbors; //set of candidates std::list<Candidate> candidates; //we are going to iteratively look for neighbor cells that could be fused to this one ccKdTree::LeafVector cellsToTest; cellsToTest.push_back(currentCell); if (progressCb && !nProgress.oneStep()) //process canceled by user { cancelled = true; break; } while (!cellsToTest.empty() || !candidates.empty()) { //get all neighbors around the 'waiting' cell(s) if (!cellsToTest.empty()) { ccKdTree::LeafSet neighbors; while (!cellsToTest.empty()) { if (!kdTree->getNeighborLeaves(cellsToTest.back(), neighbors, &unvisitedNeighborValue)) //we only consider unvisited cells! { //an error occurred return false; } cellsToTest.pop_back(); } //add those (new) neighbors to the 'visitedNeighbors' set //and to the candidates set by the way if they are not yet there for (ccKdTree::LeafSet::iterator it=neighbors.begin(); it != neighbors.end(); ++it) { ccKdTree::Leaf* neighbor = *it; std::pair<ccKdTree::LeafSet::iterator,bool> ret = visitedNeighbors.insert(neighbor); //neighbour not already in the set? if (ret.second) { //we create the corresponding candidate try { candidates.push_back(Candidate(neighbor)); } catch (const std::bad_alloc&) { //not enough memory! ccLog::Warning("[ccKdTreeForFacetExtraction] Not enough memory!"); return false; } } } } //is there remaining candidates? if (!candidates.empty()) { //update the set of candidates if (closestFirst && candidates.size() > 1) { for (std::list<Candidate>::iterator it = candidates.begin(); it !=candidates.end(); ++it) it->dist = (it->centroid-currentCentroid).norm2(); //sort candidates by their distance candidates.sort(CandidateDistAscendingComparison); } //we will keep track of the best fused 'couple' at each pass std::list<Candidate>::iterator bestIt = candidates.end(); CCLib::ReferenceCloud* bestFused = 0; CCVector3 bestNormal(0,0,0); double bestError = -1.0; unsigned skipCount = 0; for (std::list<Candidate>::iterator it = candidates.begin(); it != candidates.end(); /*++it*/) { assert(it->leaf && it->leaf->points); assert(currentPointSet->getAssociatedCloud() == it->leaf->points->getAssociatedCloud()); //if the leaf orientation is too different if (fabs(CCVector3(it->leaf->planeEq).dot(currentNormal)) < c_minCosNormAngle) { it = candidates.erase(it); //++it; //++skipCount; continue; } //compute the minimum distance between the candidate centroid and the 'currentPointSet' PointCoordinateType minDistToMainSet = 0.0; { for (unsigned j=0; j<currentPointSet->size(); ++j) { const CCVector3* P = currentPointSet->getPoint(j); PointCoordinateType d2 = (*P-it->centroid).norm2(); if (d2 < minDistToMainSet || j == 0) minDistToMainSet = d2; } minDistToMainSet = sqrt(minDistToMainSet); } //if the leaf is too far if (it->radius < minDistToMainSet / overlapCoef) { ++it; ++skipCount; continue; } //fuse the main set with the current candidate CCLib::ReferenceCloud* fused = new CCLib::ReferenceCloud(*currentPointSet); if (!fused->add(*(it->leaf->points))) { //not enough memory! ccLog::Warning("[ccKdTreeForFacetExtraction] Not enough memory!"); delete fused; if (currentPointSet != currentCell->points) delete currentPointSet; return false; } //fit a plane and estimate the resulting error double error = -1.0; const PointCoordinateType* planeEquation = CCLib::Neighbourhood(fused).getLSPlane(); if (planeEquation) error = CCLib::DistanceComputationTools::ComputeCloud2PlaneDistance(fused, planeEquation, errorMeasure); if (error < 0.0 || error > maxError) { //candidate is rejected it = candidates.erase(it); } else { //otherwise we keep track of the best one! if (bestError < 0.0 || error < bestError) { bestIt = it; bestError = error; if (bestFused) delete bestFused; bestFused = fused; bestNormal = CCVector3(planeEquation); fused = 0; if (closestFirst) break; //if we have found a good candidate, we stop here (closest first ;) } ++it; } if (fused) { delete fused; fused = 0; } } //we have a (best) candidate for this pass? if (bestIt != candidates.end()) { assert(bestFused && bestError >= 0.0); if (currentPointSet != currentCell->points) delete currentPointSet; currentPointSet = bestFused; { //update infos CCLib::Neighbourhood N(currentPointSet); //currentCentroid = *N.getGravityCenter(); //if we update it, the search will naturally shift along one dimension! //currentNormal = bestNormal; //same thing here for normals } bestIt->leaf->userData = currentCell->userData; //bestIt->leaf->userData = macroIndex++; //FIXME TEST //we will test this cell's neighbors as well cellsToTest.push_back(bestIt->leaf); if (progressCb && !nProgress.oneStep()) //process canceled by user { //premature end! candidates.clear(); cellsToTest.clear(); cancelled = true; break; } QApplication::processEvents(); //we also remove it from the candidates list candidates.erase(bestIt); } if (skipCount == candidates.size() && cellsToTest.empty()) { //only far leaves remain... candidates.clear(); } } } //no more candidates or cells to test //end of the fusion process for the current leaf if (currentPointSet != currentCell->points) delete currentPointSet; currentPointSet = 0; if (cancelled) break; } } //convert fused indexes to SF if (!cancelled) { pc->enableScalarField(); for (size_t i=0; i<leaves.size(); ++i) { CCLib::ReferenceCloud* subset = leaves[i]->points; if (subset) { ScalarType scalar = (ScalarType)leaves[i]->userData; if (leaves[i]->userData <= 0) //for unfused cells, we create new individual groups scalar = static_cast<ScalarType>(macroIndex++); //scalar = NAN_VALUE; //FIXME TEST for (unsigned j=0; j<subset->size(); ++j) subset->setPointScalarValue(j,scalar); } } //pc->setCurrentDisplayedScalarField(sfIdx); } return !cancelled; }
ccHObject* qFacets::createFacets( ccPointCloud* cloud, CCLib::ReferenceCloudContainer& components, unsigned minPointsPerComponent, double maxEdgeLength, bool randomColors, bool& error) { if (!cloud) return 0; //we create a new group to store all input CCs as 'facets' ccHObject* ccGroup = new ccHObject(cloud->getName()+QString(" [facets]")); ccGroup->setDisplay(cloud->getDisplay()); ccGroup->setVisible(true); bool cloudHasNormal = cloud->hasNormals(); //number of input components size_t componentCount = components.size(); //progress notification ccProgressDialog pDlg(true,m_app->getMainWindow()); pDlg.setMethodTitle("Facets creation"); pDlg.setInfo(qPrintable(QString("Components: %1").arg(componentCount))); pDlg.setMaximum(static_cast<int>(componentCount)); pDlg.show(); QApplication::processEvents(); //for each component error = false; while (!components.empty()) { CCLib::ReferenceCloud* compIndexes = components.back(); components.pop_back(); //if it has enough points if (compIndexes && compIndexes->size() >= minPointsPerComponent) { ccPointCloud* facetCloud = cloud->partialClone(compIndexes); if (!facetCloud) { //not enough memory! error = true; delete facetCloud; facetCloud = 0; } else { ccFacet* facet = ccFacet::Create(facetCloud,static_cast<PointCoordinateType>(maxEdgeLength),true); if (facet) { QString facetName = QString("facet %1 (rms=%2)").arg(ccGroup->getChildrenNumber()).arg(facet->getRMS()); facet->setName(facetName); if (facet->getPolygon()) { facet->getPolygon()->enableStippling(false); facet->getPolygon()->showNormals(false); } if (facet->getContour()) { facet->getContour()->setGlobalScale(facetCloud->getGlobalScale()); facet->getContour()->setGlobalShift(facetCloud->getGlobalShift()); } //check the facet normal sign if (cloudHasNormal) { CCVector3 N = ccOctree::ComputeAverageNorm(compIndexes,cloud); if (N.dot(facet->getNormal()) < 0) facet->invertNormal(); } #ifdef _DEBUG facet->showNormalVector(true); #endif //shall we colorize it with a random color? ccColor::Rgb col, darkCol; if (randomColors) { col = ccColor::Generator::Random(); assert(c_darkColorRatio <= 1.0); darkCol.r = static_cast<ColorCompType>(static_cast<double>(col.r) * c_darkColorRatio); darkCol.g = static_cast<ColorCompType>(static_cast<double>(col.g) * c_darkColorRatio); darkCol.b = static_cast<ColorCompType>(static_cast<double>(col.b) * c_darkColorRatio); } else { //use normal-based HSV coloring CCVector3 N = facet->getNormal(); PointCoordinateType dip, dipDir; ccNormalVectors::ConvertNormalToDipAndDipDir(N, dip, dipDir); FacetsClassifier::GenerateSubfamilyColor(col,dip,dipDir,0,1,&darkCol); } facet->setColor(col); if (facet->getContour()) { facet->getContour()->setColor(darkCol); facet->getContour()->setWidth(2); } ccGroup->addChild(facet); } } if (compIndexes) delete compIndexes; compIndexes = 0; } pDlg.setValue(static_cast<int>(componentCount-components.size())); //QApplication::processEvents(); } if (ccGroup->getChildrenNumber() == 0) { delete ccGroup; ccGroup = 0; } return ccGroup; }