bool ccKdTree::convertCellIndexToRandomColor() { if (!m_associatedGenericCloud || !m_associatedGenericCloud->isA(CC_TYPES::POINT_CLOUD)) return false; //get leaves std::vector<Leaf*> leaves; if (!getLeaves(leaves) || leaves.empty()) return false; ccPointCloud* pc = static_cast<ccPointCloud*>(m_associatedGenericCloud); if (!pc->resizeTheRGBTable()) return false; //for each cell for (size_t i=0; i<leaves.size(); ++i) { colorType col[3]; ccColor::Generator::Random(col); CCLib::ReferenceCloud* subset = leaves[i]->points; if (subset) { for (unsigned j=0; j<subset->size(); ++j) pc->setPointColor(subset->getPointGlobalIndex(j),col); } } pc->showColors(true); return true; }
CCLib::ReferenceCloud* ccGenericPointCloud::getTheVisiblePoints() { if (!m_visibilityArray || m_visibilityArray->currentSize()<size()) return 0; unsigned i,count = size(); assert(count == m_visibilityArray->currentSize()); //we create an entity with the 'visible' vertices only CCLib::ReferenceCloud* rc = new CCLib::ReferenceCloud(this); for (i=0;i<count;++i) if (m_visibilityArray->getValue(i) > 0) rc->addPointIndex(i); return rc; }
void ccAlignDlg::dataSamplingRateChanged(double value) { QString message("An error occured"); CC_SAMPLING_METHOD method = getSamplingMethod(); float rate = (float)dataSamplingRate->value()/(float)dataSamplingRate->maximum(); if(method == SPACE) rate = 1.0f-rate; dataSample->setSliderPosition((unsigned)((float)dataSample->maximum()*rate)); switch(method) { case SPACE: { CCLib::ReferenceCloud* tmpCloud = getSampledData(); //DGM FIXME: wow! you generate a spatially sampled cloud just to display its size?! if (tmpCloud) { message = QString("distance units (%1 remaining points)").arg(tmpCloud->size()); delete tmpCloud; } } break; case RANDOM: { message = QString("remaining points (%1%)").arg(rate*100.0f,0,'f',1); } break; case OCTREE: { CCLib::ReferenceCloud* tmpCloud = getSampledData(); //DGM FIXME: wow! you generate a spatially sampled cloud just to display its size?! if (tmpCloud) { message = QString("%1 remaining points").arg(tmpCloud->size()); delete tmpCloud; } } break; default: { unsigned remaining = (unsigned)(rate * (float)dataObject->size()); message = QString("%1 remaining points").arg(remaining); } break; } dataRemaining->setText(message); }
int ccFastMarchingForNormsDirection::updateResolvedTable(ccGenericPointCloud* theCloud, GenericChunkedArray<1,uchar> &resolved, NormsIndexesTableType* theNorms) { if (!initialized) return -1; int count=0; for (unsigned i=0;i<activeCells.size();++i) { DirectionCell* aCell = (DirectionCell*)theGrid[activeCells[i]]; CCLib::ReferenceCloud* Yk = theOctree->getPointsInCell(aCell->cellCode,gridLevel,true); if (!Yk) continue; Yk->placeIteratorAtBegining(); for (unsigned k=0;k<Yk->size();++k) { unsigned index = Yk->getCurrentPointGlobalIndex(); resolved.setValue(index,1); //resolvedValue=1 const normsType& norm = theNorms->getValue(index); if (CCVector3::vdot(ccNormalVectors::GetNormal(norm),aCell->N)<0.0) { PointCoordinateType newN[3]; const PointCoordinateType* N = ccNormalVectors::GetNormal(norm); newN[0]=-N[0]; newN[1]=-N[1]; newN[2]=-N[2]; theNorms->setValue(index,ccNormalVectors::GetNormIndex(newN)); } //norm = NormalVectors::getNormIndex(aCell->N); //theNorms->setValue(index,&norm); theCloud->setPointScalarValue(index,aCell->T); //theCloud->setPointScalarValue(index,aCell->v); Yk->forwardIterator(); ++count; } } return count; }
CCLib::ReferenceCloud* ccGenericPointCloud::getTheVisiblePoints() const { unsigned count = size(); assert(count == m_pointsVisibility->currentSize()); if (!m_pointsVisibility || m_pointsVisibility->currentSize() != count) { ccLog::Warning("[ccGenericPointCloud::getTheVisiblePoints] No visibility table instantiated!"); return 0; } //count the number of points to copy unsigned pointCount = 0; { for (unsigned i=0; i<count; ++i) if (m_pointsVisibility->getValue(i) == POINT_VISIBLE) ++pointCount; } if (pointCount == 0) { ccLog::Warning("[ccGenericPointCloud::getTheVisiblePoints] No point in selection"); return 0; } //we create an entity with the 'visible' vertices only CCLib::ReferenceCloud* rc = new CCLib::ReferenceCloud(const_cast<ccGenericPointCloud*>(this)); if (rc->reserve(pointCount)) { for (unsigned i=0; i<count; ++i) if (m_pointsVisibility->getValue(i) == POINT_VISIBLE) rc->addPointIndex(i); //can't fail (see above) } else { delete rc; rc = 0; ccLog::Error("[ccGenericPointCloud::getTheVisiblePoints] Not enough memory!"); } return rc; }
bool ccKdTree::convertCellIndexToSF() { if (!m_associatedGenericCloud || !m_associatedGenericCloud->isA(CC_TYPES::POINT_CLOUD)) return false; //get leaves std::vector<Leaf*> leaves; if (!getLeaves(leaves) || leaves.empty()) return false; ccPointCloud* pc = static_cast<ccPointCloud*>(m_associatedGenericCloud); const char c_defaultSFName[] = "Kd-tree indexes"; int sfIdx = pc->getScalarFieldIndexByName(c_defaultSFName); if (sfIdx < 0) sfIdx = pc->addScalarField(c_defaultSFName); if (sfIdx < 0) { ccLog::Error("Not enough memory!"); return false; } pc->setCurrentScalarField(sfIdx); //for each cell for (size_t i=0; i<leaves.size(); ++i) { CCLib::ReferenceCloud* subset = leaves[i]->points; if (subset) { for (unsigned j=0; j<subset->size(); ++j) subset->setPointScalarValue(j,(ScalarType)i); } } pc->getScalarField(sfIdx)->computeMinAndMax(); pc->setCurrentDisplayedScalarField(sfIdx); pc->showSF(true); return true; }
void ccAlignDlg::estimateDelta() { unsigned i, nb; float meanDensity, meanSqrDensity, dev, value; ccProgressDialog pDlg(false,this); CCLib::ReferenceCloud *sampledData = getSampledData(); //we have to work on a copy of the cloud in order to prevent the algorithms from modifying the original cloud. CCLib::ChunkedPointCloud* cloud = new CCLib::ChunkedPointCloud(); cloud->reserve(sampledData->size()); for(i=0; i<sampledData->size(); i++) cloud->addPoint(*sampledData->getPoint(i)); cloud->enableScalarField(); CCLib::GeometricalAnalysisTools::computeLocalDensity(cloud, &pDlg); nb = 0; meanDensity = 0.; meanSqrDensity = 0.; for(i=0; i<cloud->size(); i++) { value = cloud->getPointScalarValue(i); if(value > ZERO_TOLERANCE) { value = 1/value; meanDensity += value; meanSqrDensity += value*value; nb++; } } meanDensity /= (float)nb; meanSqrDensity /= (float)nb; dev = meanSqrDensity-(meanDensity*meanDensity); delta->setValue(meanDensity+dev); delete sampledData; delete cloud; }
bool ccPointPairRegistrationDlg::convertToSphereCenter(CCVector3d& P, ccHObject* entity, PointCoordinateType& sphereRadius) { sphereRadius = -PC_ONE; if ( !entity || !useSphereToolButton->isChecked() || !entity->isKindOf(CC_TYPES::POINT_CLOUD) ) //only works with cloud right now { //nothing to do return true; } //we'll now try to detect the sphere double searchRadius = radiusDoubleSpinBox->value(); double maxRMSPercentage = maxRmsSpinBox->value() / 100.0; ccGenericPointCloud* cloud = static_cast<ccGenericPointCloud*>(entity); assert(cloud); //crop points inside a box centered on the current point ccBBox box; box.add(CCVector3::fromArray((P - CCVector3d(1,1,1)*searchRadius).u)); box.add(CCVector3::fromArray((P + CCVector3d(1,1,1)*searchRadius).u)); CCLib::ReferenceCloud* part = cloud->crop(box,true); bool success = false; if (part && part->size() > 16) { PointCoordinateType radius; CCVector3 C; double rms; ccProgressDialog pDlg(true, this); //first roughly search for the sphere if (CCLib::GeometricalAnalysisTools::detectSphereRobust(part,0.5,C,radius,rms,&pDlg,0.9)) { if (radius / searchRadius < 0.5 || radius / searchRadius > 2.0) { ccLog::Warning(QString("[ccPointPairRegistrationDlg] Detected sphere radius (%1) is too far from search radius!").arg(radius)); } else { //now look again (more precisely) { delete part; box.clear(); box.add(C - CCVector3(1,1,1)*radius*static_cast<PointCoordinateType>(1.05)); //add 5% box.add(C + CCVector3(1,1,1)*radius*static_cast<PointCoordinateType>(1.05)); //add 5% part = cloud->crop(box,true); if (part && part->size() > 16) CCLib::GeometricalAnalysisTools::detectSphereRobust(part,0.5,C,radius,rms,&pDlg,0.99); } ccLog::Print(QString("[ccPointPairRegistrationDlg] Detected sphere radius = %1 (rms = %2)").arg(radius).arg(rms)); if (radius / searchRadius < 0.5 || radius / searchRadius > 2.0) { ccLog::Warning("[ccPointPairRegistrationDlg] Sphere radius is too far from search radius!"); } else if (rms / searchRadius >= maxRMSPercentage) { ccLog::Warning("[ccPointPairRegistrationDlg] RMS is too high!"); } else { sphereRadius = radius; P = CCVector3d::fromArray(C.u); success = true; } } } else { ccLog::Warning("[ccPointPairRegistrationDlg] Failed to fit a sphere around the picked point!"); } } else { //not enough memory? No points inside the ccLog::Warning("[ccPointPairRegistrationDlg] Failed to crop points around the picked point?!"); } if (part) delete part; return success; }
bool ccRegistrationTools::ICP( ccHObject* data, ccHObject* model, ccGLMatrix& transMat, double &finalScale, double& finalRMS, unsigned& finalPointCount, double minRMSDecrease, unsigned maxIterationCount, unsigned randomSamplingLimit, bool removeFarthestPoints, CCLib::ICPRegistrationTools::CONVERGENCE_TYPE method, bool adjustScale, double finalOverlapRatio/*=1.0*/, bool useDataSFAsWeights/*=false*/, bool useModelSFAsWeights/*=false*/, int filters/*=CCLib::ICPRegistrationTools::SKIP_NONE*/, int maxThreadCount/*=0*/, QWidget* parent/*=0*/) { //progress bar QScopedPointer<ccProgressDialog> progressDlg; if (parent) { progressDlg.reset(new ccProgressDialog(false, parent)); } Garbage<CCLib::GenericIndexedCloudPersist> cloudGarbage; //if the 'model' entity is a mesh, we need to sample points on it CCLib::GenericIndexedCloudPersist* modelCloud = nullptr; ccGenericMesh* modelMesh = nullptr; if (model->isKindOf(CC_TYPES::MESH)) { modelMesh = ccHObjectCaster::ToGenericMesh(model); modelCloud = modelMesh->getAssociatedCloud(); } else { modelCloud = ccHObjectCaster::ToGenericPointCloud(model); } //if the 'data' entity is a mesh, we need to sample points on it CCLib::GenericIndexedCloudPersist* dataCloud = nullptr; if (data->isKindOf(CC_TYPES::MESH)) { dataCloud = CCLib::MeshSamplingTools::samplePointsOnMesh(ccHObjectCaster::ToGenericMesh(data), s_defaultSampledPointsOnDataMesh, progressDlg.data()); if (!dataCloud) { ccLog::Error("[ICP] Failed to sample points on 'data' mesh!"); return false; } cloudGarbage.add(dataCloud); } else { dataCloud = ccHObjectCaster::ToGenericPointCloud(data); } //we activate a temporary scalar field for registration distances computation CCLib::ScalarField* dataDisplayedSF = nullptr; int oldDataSfIdx = -1, dataSfIdx = -1; //if the 'data' entity is a real ccPointCloud, we can even create a proper temporary SF for registration distances if (data->isA(CC_TYPES::POINT_CLOUD)) { ccPointCloud* pc = static_cast<ccPointCloud*>(data); dataDisplayedSF = pc->getCurrentDisplayedScalarField(); oldDataSfIdx = pc->getCurrentInScalarFieldIndex(); dataSfIdx = pc->getScalarFieldIndexByName(REGISTRATION_DISTS_SF); if (dataSfIdx < 0) dataSfIdx = pc->addScalarField(REGISTRATION_DISTS_SF); if (dataSfIdx >= 0) pc->setCurrentScalarField(dataSfIdx); else { ccLog::Error("[ICP] Couldn't create temporary scalar field! Not enough memory?"); return false; } } else { if (!dataCloud->enableScalarField()) { ccLog::Error("[ICP] Couldn't create temporary scalar field! Not enough memory?"); return false; } } //add a 'safety' margin to input ratio static double s_overlapMarginRatio = 0.2; finalOverlapRatio = std::max(finalOverlapRatio, 0.01); //1% minimum //do we need to reduce the input point cloud (so as to be close //to the theoretical number of overlapping points - but not too //low so as we are not registered yet ;) if (finalOverlapRatio < 1.0 - s_overlapMarginRatio) { //DGM we can now use 'approximate' distances as SAITO algorithm is exact (but with a coarse resolution) //level = 7 if < 1.000.000 //level = 8 if < 10.000.000 //level = 9 if > 10.000.000 int gridLevel = static_cast<int>(floor(log10(static_cast<double>(std::max(dataCloud->size(), modelCloud->size()))))) + 2; gridLevel = std::min(std::max(gridLevel, 7), 9); int result = -1; if (modelMesh) { CCLib::DistanceComputationTools::Cloud2MeshDistanceComputationParams c2mParams; c2mParams.octreeLevel = gridLevel; c2mParams.maxSearchDist = 0; c2mParams.useDistanceMap = true; c2mParams.signedDistances = false; c2mParams.flipNormals = false; c2mParams.multiThread = false; result = CCLib::DistanceComputationTools::computeCloud2MeshDistance(dataCloud, modelMesh, c2mParams, progressDlg.data()); } else { result = CCLib::DistanceComputationTools::computeApproxCloud2CloudDistance( dataCloud, modelCloud, gridLevel, -1, progressDlg.data()); } if (result < 0) { ccLog::Error("Failed to determine the max (overlap) distance (not enough memory?)"); return false; } //determine the max distance that (roughly) corresponds to the input overlap ratio ScalarType maxSearchDist = 0; { unsigned count = dataCloud->size(); std::vector<ScalarType> distances; try { distances.resize(count); } catch (const std::bad_alloc&) { ccLog::Error("Not enough memory!"); return false; } for (unsigned i=0; i<count; ++i) { distances[i] = dataCloud->getPointScalarValue(i); } ParallelSort(distances.begin(), distances.end()); //now look for the max value at 'finalOverlapRatio+margin' percent maxSearchDist = distances[static_cast<unsigned>(std::max(1.0,count*(finalOverlapRatio+s_overlapMarginRatio)))-1]; } //evntually select the points with distance below 'maxSearchDist' //(should roughly correspond to 'finalOverlapRatio + margin' percent) { CCLib::ReferenceCloud* refCloud = new CCLib::ReferenceCloud(dataCloud); cloudGarbage.add(refCloud); unsigned countBefore = dataCloud->size(); unsigned baseIncrement = static_cast<unsigned>(std::max(100.0,countBefore*finalOverlapRatio*0.05)); for (unsigned i=0; i<countBefore; ++i) { if (dataCloud->getPointScalarValue(i) <= maxSearchDist) { if ( refCloud->size() == refCloud->capacity() && !refCloud->reserve(refCloud->size() + baseIncrement) ) { ccLog::Error("Not enough memory!"); return false; } refCloud->addPointIndex(i); } } refCloud->resize(refCloud->size()); dataCloud = refCloud; unsigned countAfter = dataCloud->size(); double keptRatio = static_cast<double>(countAfter)/countBefore; ccLog::Print(QString("[ICP][Partial overlap] Selecting %1 points out of %2 (%3%) for registration").arg(countAfter).arg(countBefore).arg(static_cast<int>(100*keptRatio))); //update the relative 'final overlap' ratio finalOverlapRatio /= keptRatio; } } //weights CCLib::ScalarField* modelWeights = nullptr; CCLib::ScalarField* dataWeights = nullptr; { if (!modelMesh && useModelSFAsWeights) { if (modelCloud == dynamic_cast<CCLib::GenericIndexedCloudPersist*>(model) && model->isA(CC_TYPES::POINT_CLOUD)) { ccPointCloud* pc = static_cast<ccPointCloud*>(model); modelWeights = pc->getCurrentDisplayedScalarField(); if (!modelWeights) ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but model has no displayed scalar field!"); } else { ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but only point cloud scalar fields can be used as weights!"); } } if (useDataSFAsWeights) { if (!dataDisplayedSF) { if (dataCloud == ccHObjectCaster::ToPointCloud(data)) ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but data has no displayed scalar field!"); else ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but only point cloud scalar fields can be used as weights!"); } else { dataWeights = dataDisplayedSF; } } } CCLib::ICPRegistrationTools::RESULT_TYPE result; CCLib::PointProjectionTools::Transformation transform; CCLib::ICPRegistrationTools::Parameters params; { params.convType = method; params.minRMSDecrease = minRMSDecrease; params.nbMaxIterations = maxIterationCount; params.adjustScale = adjustScale; params.filterOutFarthestPoints = removeFarthestPoints; params.samplingLimit = randomSamplingLimit; params.finalOverlapRatio = finalOverlapRatio; params.modelWeights = modelWeights; params.dataWeights = dataWeights; params.transformationFilters = filters; params.maxThreadCount = maxThreadCount; } result = CCLib::ICPRegistrationTools::Register( modelCloud, modelMesh, dataCloud, params, transform, finalRMS, finalPointCount, static_cast<CCLib::GenericProgressCallback*>(progressDlg.data())); if (result >= CCLib::ICPRegistrationTools::ICP_ERROR) { ccLog::Error("Registration failed: an error occurred (code %i)",result); } else if (result == CCLib::ICPRegistrationTools::ICP_APPLY_TRANSFO) { transMat = FromCCLibMatrix<PointCoordinateType, float>(transform.R, transform.T, transform.s); finalScale = transform.s; } //remove temporary SF (if any) if (dataSfIdx >= 0) { assert(data->isA(CC_TYPES::POINT_CLOUD)); ccPointCloud* pc = static_cast<ccPointCloud*>(data); pc->setCurrentScalarField(oldDataSfIdx); pc->deleteScalarField(dataSfIdx); } return (result < CCLib::ICPRegistrationTools::ICP_ERROR); }
CCLib::ReferenceCloud* qHPR::removeHiddenPoints(CCLib::GenericIndexedCloudPersist* theCloud, const CCVector3d& viewPoint, double fParam) { assert(theCloud); unsigned nbPoints = theCloud->size(); if (nbPoints == 0) return 0; //less than 4 points? no need for calculation, we return the whole cloud if (nbPoints < 4) { CCLib::ReferenceCloud* visiblePoints = new CCLib::ReferenceCloud(theCloud); if (!visiblePoints->addPointIndex(0,nbPoints)) //well even for less than 4 points we never know ;) { //not enough memory! delete visiblePoints; visiblePoints = 0; } return visiblePoints; } double maxRadius = 0; //convert point cloud to an array of double triplets (for qHull) coordT* pt_array = new coordT[(nbPoints+1)*3]; { coordT* _pt_array = pt_array; for (unsigned i=0; i<nbPoints; ++i) { CCVector3d P = CCVector3d::fromArray(theCloud->getPoint(i)->u) - viewPoint; *_pt_array++ = static_cast<coordT>(P.x); *_pt_array++ = static_cast<coordT>(P.y); *_pt_array++ = static_cast<coordT>(P.z); //we keep track of the highest 'radius' double r2 = P.norm2(); if (maxRadius < r2) maxRadius = r2; } //we add the view point (Cf. HPR) *_pt_array++ = 0; *_pt_array++ = 0; *_pt_array++ = 0; maxRadius = sqrt(maxRadius); } //apply spherical flipping { maxRadius *= pow(10.0,fParam) * 2; coordT* _pt_array = pt_array; for (unsigned i=0; i<nbPoints; ++i) { CCVector3d P = CCVector3d::fromArray(theCloud->getPoint(i)->u) - viewPoint; double r = (maxRadius/P.norm()) - 1.0; *_pt_array++ *= r; *_pt_array++ *= r; *_pt_array++ *= r; } } //array to flag points on the convex hull std::vector<bool> pointBelongsToCvxHull; static char qHullCommand[] = "qhull QJ Qci"; if (!qh_new_qhull(3,nbPoints+1,pt_array,False,qHullCommand,0,stderr)) { try { pointBelongsToCvxHull.resize(nbPoints+1,false); } catch (const std::bad_alloc&) { //not enough memory! delete[] pt_array; return 0; } vertexT *vertex = 0,**vertexp = 0; facetT *facet = 0; FORALLfacets { //if (!facet->simplicial) // error("convhulln: non-simplicial facet"); // should never happen with QJ setT* vertices = qh_facet3vertex(facet); FOREACHvertex_(vertices) { pointBelongsToCvxHull[qh_pointid(vertex->point)] = true; } qh_settempfree(&vertices); } } delete[] pt_array; pt_array = 0; qh_freeqhull(!qh_ALL); //free long memory int curlong, totlong; qh_memfreeshort (&curlong, &totlong); //free short memory and memory allocator if (!pointBelongsToCvxHull.empty()) { //compute the number of points belonging to the convex hull unsigned cvxHullSize = 0; { for (unsigned i=0; i<nbPoints; ++i) if (pointBelongsToCvxHull[i]) ++cvxHullSize; } CCLib::ReferenceCloud* visiblePoints = new CCLib::ReferenceCloud(theCloud); if (cvxHullSize!=0 && visiblePoints->reserve(cvxHullSize)) { for (unsigned i=0; i<nbPoints; ++i) if (pointBelongsToCvxHull[i]) visiblePoints->addPointIndex(i); //can't fail, see above return visiblePoints; } else //not enough memory { delete visiblePoints; visiblePoints = 0; } } return 0; }
void qHPR::doAction() { assert(m_app); if (!m_app) return; const ccHObject::Container& selectedEntities = m_app->getSelectedEntities(); size_t selNum = selectedEntities.size(); if ( selNum != 1 || !selectedEntities.front()->isA(CC_TYPES::POINT_CLOUD)) { m_app->dispToConsole("Select only one cloud!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } ccPointCloud* cloud = static_cast<ccPointCloud*>(selectedEntities[0]); ccGLWindow* win = m_app->getActiveGLWindow(); if (!win) { m_app->dispToConsole("No active window!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } //display parameters const ccViewportParameters& params = win->getViewportParameters(); if (!params.perspectiveView) { m_app->dispToConsole("Perspective mode only!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } ccHprDlg dlg(m_app->getMainWindow()); if (!dlg.exec()) return; //progress dialog ccProgressDialog progressCb(false,m_app->getMainWindow()); //unique parameter: the octree subdivision level int octreeLevel = dlg.octreeLevelSpinBox->value(); assert(octreeLevel >= 0 && octreeLevel <= CCLib::DgmOctree::MAX_OCTREE_LEVEL); //compute octree if cloud hasn't any ccOctree::Shared theOctree = cloud->getOctree(); if (!theOctree) { theOctree = cloud->computeOctree(&progressCb); if (theOctree && cloud->getParent()) { m_app->addToDB(cloud->getOctreeProxy()); } } if (!theOctree) { m_app->dispToConsole("Couldn't compute octree!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } CCVector3d viewPoint = params.cameraCenter; if (params.objectCenteredView) { CCVector3d PC = params.cameraCenter - params.pivotPoint; params.viewMat.inverse().apply(PC); viewPoint = params.pivotPoint + PC; } //HPR CCLib::ReferenceCloud* visibleCells = 0; { QElapsedTimer eTimer; eTimer.start(); CCLib::ReferenceCloud* theCellCenters = CCLib::CloudSamplingTools::subsampleCloudWithOctreeAtLevel( cloud, static_cast<unsigned char>(octreeLevel), CCLib::CloudSamplingTools::NEAREST_POINT_TO_CELL_CENTER, &progressCb, theOctree.data()); if (!theCellCenters) { m_app->dispToConsole("Error while simplifying point cloud with octree!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } visibleCells = removeHiddenPoints(theCellCenters,viewPoint,3.5); m_app->dispToConsole(QString("[HPR] Cells: %1 - Time: %2 s").arg(theCellCenters->size()).arg(eTimer.elapsed()/1.0e3)); //warning: after this, visibleCells can't be used anymore as a //normal cloud (as it's 'associated cloud' has been deleted). //Only its indexes are valid! (they are corresponding to octree cells) delete theCellCenters; theCellCenters = 0; } if (visibleCells) { //DGM: we generate a new cloud now, instead of playing with the points visiblity! (too confusing for the user) /*if (!cloud->isVisibilityTableInstantiated() && !cloud->resetVisibilityArray()) { m_app->dispToConsole("Visibility array allocation failed! (Not enough memory?)",ccMainAppInterface::ERR_CONSOLE_MESSAGE); return; } ccPointCloud::VisibilityTableType* pointsVisibility = cloud->getTheVisibilityArray(); assert(pointsVisibility); pointsVisibility->fill(POINT_HIDDEN); //*/ CCLib::ReferenceCloud visiblePoints(theOctree->associatedCloud()); unsigned visiblePointCount = 0; unsigned visibleCellsCount = visibleCells->size(); CCLib::DgmOctree::cellIndexesContainer cellIndexes; if (!theOctree->getCellIndexes(static_cast<unsigned char>(octreeLevel), cellIndexes)) { m_app->dispToConsole("Couldn't fetch the list of octree cell indexes! (Not enough memory?)",ccMainAppInterface::ERR_CONSOLE_MESSAGE); delete visibleCells; return; } for (unsigned i=0; i<visibleCellsCount; ++i) { //cell index unsigned index = visibleCells->getPointGlobalIndex(i); //points in this cell... CCLib::ReferenceCloud Yk(theOctree->associatedCloud()); theOctree->getPointsInCellByCellIndex(&Yk,cellIndexes[index],static_cast<unsigned char>(octreeLevel)); //...are all visible /*unsigned count = Yk.size(); for (unsigned j=0;j<count;++j) pointsVisibility->setValue(Yk.getPointGlobalIndex(j),POINT_VISIBLE); visiblePointCount += count; //*/ if (!visiblePoints.add(Yk)) { m_app->dispToConsole("Not enough memory!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); delete visibleCells; return; } } delete visibleCells; visibleCells = 0; m_app->dispToConsole(QString("[HPR] Visible points: %1").arg(visiblePointCount)); if (visiblePoints.size() == cloud->size()) { m_app->dispToConsole("No points were removed!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); } else { //create cloud from visibility selection ccPointCloud* newCloud = cloud->partialClone(&visiblePoints); if (newCloud) { newCloud->setDisplay(newCloud->getDisplay()); newCloud->setVisible(true); newCloud->setName(cloud->getName()+QString(".visible_points")); cloud->setEnabled(false); //add associated viewport object cc2DViewportObject* viewportObject = new cc2DViewportObject(QString("Viewport")); viewportObject->setParameters(params); newCloud->addChild(viewportObject); m_app->addToDB(newCloud); newCloud->redrawDisplay(); } else { m_app->dispToConsole("Not enough memory!",ccMainAppInterface::ERR_CONSOLE_MESSAGE); } } } //currently selected entities appearance may have changed! m_app->refreshAll(); }
bool ccKdTreeForFacetExtraction::FuseCells( ccKdTree* kdTree, double maxError, CCLib::DistanceComputationTools::ERROR_MEASURES errorMeasure, double maxAngle_deg, PointCoordinateType overlapCoef/*=1*/, bool closestFirst/*=true*/, CCLib::GenericProgressCallback* progressCb/*=0*/) { if (!kdTree) return false; ccGenericPointCloud* associatedGenericCloud = kdTree->associatedGenericCloud(); if (!associatedGenericCloud || !associatedGenericCloud->isA(CC_TYPES::POINT_CLOUD) || maxError < 0.0) return false; //get leaves std::vector<ccKdTree::Leaf*> leaves; if (!kdTree->getLeaves(leaves) || leaves.empty()) return false; //progress notification CCLib::NormalizedProgress nProgress(progressCb, static_cast<unsigned>(leaves.size())); if (progressCb) { progressCb->update(0); if (progressCb->textCanBeEdited()) { progressCb->setMethodTitle("Fuse Kd-tree cells"); progressCb->setInfo(qPrintable(QString("Cells: %1\nMax error: %2").arg(leaves.size()).arg(maxError))); } progressCb->start(); } ccPointCloud* pc = static_cast<ccPointCloud*>(associatedGenericCloud); //sort cells based on their population size (we start by the biggest ones) SortAlgo(leaves.begin(), leaves.end(), DescendingLeafSizeComparison); //set all 'userData' to -1 (i.e. unfused cells) { for (size_t i=0; i<leaves.size(); ++i) { leaves[i]->userData = -1; //check by the way that the plane normal is unit! assert(static_cast<double>(fabs(CCVector3(leaves[i]->planeEq).norm2()) - 1.0) < 1.0e-6); } } // cosine of the max angle between fused 'planes' const double c_minCosNormAngle = cos(maxAngle_deg * CC_DEG_TO_RAD); //fuse all cells, starting from the ones with the best error const int unvisitedNeighborValue = -1; bool cancelled = false; int macroIndex = 1; //starts at 1 (0 is reserved for cells already above the max error) { for (size_t i=0; i<leaves.size(); ++i) { ccKdTree::Leaf* currentCell = leaves[i]; if (currentCell->error >= maxError) currentCell->userData = 0; //0 = special group for cells already above the user defined threshold! //already fused? if (currentCell->userData != -1) { if (progressCb && !nProgress.oneStep()) //process canceled by user { cancelled = true; break; } continue; } //we create a new "macro cell" index currentCell->userData = macroIndex++; //we init the current set of 'fused' points with the cell's points CCLib::ReferenceCloud* currentPointSet = currentCell->points; //get current fused set centroid and normal CCVector3 currentCentroid = *CCLib::Neighbourhood(currentPointSet).getGravityCenter(); CCVector3 currentNormal(currentCell->planeEq); //visited neighbors ccKdTree::LeafSet visitedNeighbors; //set of candidates std::list<Candidate> candidates; //we are going to iteratively look for neighbor cells that could be fused to this one ccKdTree::LeafVector cellsToTest; cellsToTest.push_back(currentCell); if (progressCb && !nProgress.oneStep()) //process canceled by user { cancelled = true; break; } while (!cellsToTest.empty() || !candidates.empty()) { //get all neighbors around the 'waiting' cell(s) if (!cellsToTest.empty()) { ccKdTree::LeafSet neighbors; while (!cellsToTest.empty()) { if (!kdTree->getNeighborLeaves(cellsToTest.back(), neighbors, &unvisitedNeighborValue)) //we only consider unvisited cells! { //an error occurred return false; } cellsToTest.pop_back(); } //add those (new) neighbors to the 'visitedNeighbors' set //and to the candidates set by the way if they are not yet there for (ccKdTree::LeafSet::iterator it=neighbors.begin(); it != neighbors.end(); ++it) { ccKdTree::Leaf* neighbor = *it; std::pair<ccKdTree::LeafSet::iterator,bool> ret = visitedNeighbors.insert(neighbor); //neighbour not already in the set? if (ret.second) { //we create the corresponding candidate try { candidates.push_back(Candidate(neighbor)); } catch (const std::bad_alloc&) { //not enough memory! ccLog::Warning("[ccKdTreeForFacetExtraction] Not enough memory!"); return false; } } } } //is there remaining candidates? if (!candidates.empty()) { //update the set of candidates if (closestFirst && candidates.size() > 1) { for (std::list<Candidate>::iterator it = candidates.begin(); it !=candidates.end(); ++it) it->dist = (it->centroid-currentCentroid).norm2(); //sort candidates by their distance candidates.sort(CandidateDistAscendingComparison); } //we will keep track of the best fused 'couple' at each pass std::list<Candidate>::iterator bestIt = candidates.end(); CCLib::ReferenceCloud* bestFused = 0; CCVector3 bestNormal(0,0,0); double bestError = -1.0; unsigned skipCount = 0; for (std::list<Candidate>::iterator it = candidates.begin(); it != candidates.end(); /*++it*/) { assert(it->leaf && it->leaf->points); assert(currentPointSet->getAssociatedCloud() == it->leaf->points->getAssociatedCloud()); //if the leaf orientation is too different if (fabs(CCVector3(it->leaf->planeEq).dot(currentNormal)) < c_minCosNormAngle) { it = candidates.erase(it); //++it; //++skipCount; continue; } //compute the minimum distance between the candidate centroid and the 'currentPointSet' PointCoordinateType minDistToMainSet = 0.0; { for (unsigned j=0; j<currentPointSet->size(); ++j) { const CCVector3* P = currentPointSet->getPoint(j); PointCoordinateType d2 = (*P-it->centroid).norm2(); if (d2 < minDistToMainSet || j == 0) minDistToMainSet = d2; } minDistToMainSet = sqrt(minDistToMainSet); } //if the leaf is too far if (it->radius < minDistToMainSet / overlapCoef) { ++it; ++skipCount; continue; } //fuse the main set with the current candidate CCLib::ReferenceCloud* fused = new CCLib::ReferenceCloud(*currentPointSet); if (!fused->add(*(it->leaf->points))) { //not enough memory! ccLog::Warning("[ccKdTreeForFacetExtraction] Not enough memory!"); delete fused; if (currentPointSet != currentCell->points) delete currentPointSet; return false; } //fit a plane and estimate the resulting error double error = -1.0; const PointCoordinateType* planeEquation = CCLib::Neighbourhood(fused).getLSPlane(); if (planeEquation) error = CCLib::DistanceComputationTools::ComputeCloud2PlaneDistance(fused, planeEquation, errorMeasure); if (error < 0.0 || error > maxError) { //candidate is rejected it = candidates.erase(it); } else { //otherwise we keep track of the best one! if (bestError < 0.0 || error < bestError) { bestIt = it; bestError = error; if (bestFused) delete bestFused; bestFused = fused; bestNormal = CCVector3(planeEquation); fused = 0; if (closestFirst) break; //if we have found a good candidate, we stop here (closest first ;) } ++it; } if (fused) { delete fused; fused = 0; } } //we have a (best) candidate for this pass? if (bestIt != candidates.end()) { assert(bestFused && bestError >= 0.0); if (currentPointSet != currentCell->points) delete currentPointSet; currentPointSet = bestFused; { //update infos CCLib::Neighbourhood N(currentPointSet); //currentCentroid = *N.getGravityCenter(); //if we update it, the search will naturally shift along one dimension! //currentNormal = bestNormal; //same thing here for normals } bestIt->leaf->userData = currentCell->userData; //bestIt->leaf->userData = macroIndex++; //FIXME TEST //we will test this cell's neighbors as well cellsToTest.push_back(bestIt->leaf); if (progressCb && !nProgress.oneStep()) //process canceled by user { //premature end! candidates.clear(); cellsToTest.clear(); cancelled = true; break; } QApplication::processEvents(); //we also remove it from the candidates list candidates.erase(bestIt); } if (skipCount == candidates.size() && cellsToTest.empty()) { //only far leaves remain... candidates.clear(); } } } //no more candidates or cells to test //end of the fusion process for the current leaf if (currentPointSet != currentCell->points) delete currentPointSet; currentPointSet = 0; if (cancelled) break; } } //convert fused indexes to SF if (!cancelled) { pc->enableScalarField(); for (size_t i=0; i<leaves.size(); ++i) { CCLib::ReferenceCloud* subset = leaves[i]->points; if (subset) { ScalarType scalar = (ScalarType)leaves[i]->userData; if (leaves[i]->userData <= 0) //for unfused cells, we create new individual groups scalar = static_cast<ScalarType>(macroIndex++); //scalar = NAN_VALUE; //FIXME TEST for (unsigned j=0; j<subset->size(); ++j) subset->setPointScalarValue(j,scalar); } } //pc->setCurrentDisplayedScalarField(sfIdx); } return !cancelled; }
ccHObject* qFacets::createFacets( ccPointCloud* cloud, CCLib::ReferenceCloudContainer& components, unsigned minPointsPerComponent, double maxEdgeLength, bool randomColors, bool& error) { if (!cloud) return 0; //we create a new group to store all input CCs as 'facets' ccHObject* ccGroup = new ccHObject(cloud->getName()+QString(" [facets]")); ccGroup->setDisplay(cloud->getDisplay()); ccGroup->setVisible(true); bool cloudHasNormal = cloud->hasNormals(); //number of input components size_t componentCount = components.size(); //progress notification ccProgressDialog pDlg(true,m_app->getMainWindow()); pDlg.setMethodTitle("Facets creation"); pDlg.setInfo(qPrintable(QString("Components: %1").arg(componentCount))); pDlg.setMaximum(static_cast<int>(componentCount)); pDlg.show(); QApplication::processEvents(); //for each component error = false; while (!components.empty()) { CCLib::ReferenceCloud* compIndexes = components.back(); components.pop_back(); //if it has enough points if (compIndexes && compIndexes->size() >= minPointsPerComponent) { ccPointCloud* facetCloud = cloud->partialClone(compIndexes); if (!facetCloud) { //not enough memory! error = true; delete facetCloud; facetCloud = 0; } else { ccFacet* facet = ccFacet::Create(facetCloud,static_cast<PointCoordinateType>(maxEdgeLength),true); if (facet) { QString facetName = QString("facet %1 (rms=%2)").arg(ccGroup->getChildrenNumber()).arg(facet->getRMS()); facet->setName(facetName); if (facet->getPolygon()) { facet->getPolygon()->enableStippling(false); facet->getPolygon()->showNormals(false); } if (facet->getContour()) { facet->getContour()->setGlobalScale(facetCloud->getGlobalScale()); facet->getContour()->setGlobalShift(facetCloud->getGlobalShift()); } //check the facet normal sign if (cloudHasNormal) { CCVector3 N = ccOctree::ComputeAverageNorm(compIndexes,cloud); if (N.dot(facet->getNormal()) < 0) facet->invertNormal(); } #ifdef _DEBUG facet->showNormalVector(true); #endif //shall we colorize it with a random color? ccColor::Rgb col, darkCol; if (randomColors) { col = ccColor::Generator::Random(); assert(c_darkColorRatio <= 1.0); darkCol.r = static_cast<ColorCompType>(static_cast<double>(col.r) * c_darkColorRatio); darkCol.g = static_cast<ColorCompType>(static_cast<double>(col.g) * c_darkColorRatio); darkCol.b = static_cast<ColorCompType>(static_cast<double>(col.b) * c_darkColorRatio); } else { //use normal-based HSV coloring CCVector3 N = facet->getNormal(); PointCoordinateType dip, dipDir; ccNormalVectors::ConvertNormalToDipAndDipDir(N, dip, dipDir); FacetsClassifier::GenerateSubfamilyColor(col,dip,dipDir,0,1,&darkCol); } facet->setColor(col); if (facet->getContour()) { facet->getContour()->setColor(darkCol); facet->getContour()->setWidth(2); } ccGroup->addChild(facet); } } if (compIndexes) delete compIndexes; compIndexes = 0; } pDlg.setValue(static_cast<int>(componentCount-components.size())); //QApplication::processEvents(); } if (ccGroup->getChildrenNumber() == 0) { delete ccGroup; ccGroup = 0; } return ccGroup; }
int Cropper::compute() { ccHObject::Container outcrops = vombat::theInstance()->getAllObjectsSelectedBySPCDti(&spc::VirtualOutcrop::Type); if (outcrops.size() > 1) { LOG(INFO) << "please select only one virtual outcrop on which to operate"; return 1; } else if (outcrops.size() == 1) { m_root_outcrop = dynamic_cast<ccVirtualOutcrop*>(outcrops.at(0)); } else m_root_outcrop = new ccVirtualOutcrop(); ccHObject::Container selections = m_dialog->getSelections(); // ccHObject::Container clouds = m_dialog->getClouds(); ccHObject::Container croppables = m_dialog->getCroppables(); LOG(INFO) << "found " << selections.size() << " to be processed"; // LOG(INFO) << "Found " << clouds.size() << " point clouds to be analyzed"; LOG(INFO) << "Found " << croppables.size() << " polylines"; // if (m_dialog->generateRegions()) // first create selections out of regions for (ccHObject* obj : selections) { ccPlanarSelection* sel = dynamic_cast<ccPlanarSelection*>(obj); for (ccHObject* to_crop : croppables) { if (to_crop->isA(CC_TYPES::POLY_LINE)) { // if (to_crop->isA(CC_TYPES::POINT_CLOUD)) { ccPolyline* pline = ccHObjectCaster::ToPolyline(to_crop); ccPointCloud* vertices = dynamic_cast<ccPointCloud*>(pline->getAssociatedCloud()); ccPointCloud* cropped_vertices = sel->crop(vertices); if (cropped_vertices) { ccPolyline* cropped_pline = new ccPolyline(cropped_vertices); cropped_pline->addPointIndex(0, cropped_vertices->size() - 1); cropped_pline->setVisible(true); // if (m_dialog->cropStrataTraces()) sel->addChild(cropped_pline); } } else if (to_crop->isA(CC_TYPES::POINT_CLOUD)) { LOG(INFO) << "cropping point cloud with name " << to_crop->getName().toStdString(); ccPointCloud* cloud = ccHObjectCaster::ToPointCloud(to_crop); spc::PointCloudBase::Ptr asspc = spcCCPointCloud::fromccPointCloud(cloud); spc::SelectionExtractor<Eigen::Vector3f, int> ext; ext.setInputSet(asspc); ext.setSelection(sel->getSPCElement<spc::SelectionBase<Eigen::Vector3f> >()); ext.compute(); std::vector<int> inside = ext.getInsideIds(); if (inside.size() == 0) { LOG(WARNING) << "the selection did not find anything inside it. Skipping"; continue; } CCLib::ReferenceCloud* ref = new CCLib::ReferenceCloud(cloud); for (int i : inside) { ref->addPointIndex(i); } ccPointCloud* outcloud = cloud->partialClone(ref); LOG(INFO) << "done with cropping, calling add child on object: " << sel->getName().toStdString(); sel->addChild(outcloud); newEntity(outcloud); } } } return 1; }
CCLib::ReferenceCloud* qHPR::removeHiddenPoints(CCLib::GenericIndexedCloudPersist* theCloud, float viewPoint[], float fParam) { assert(theCloud); unsigned i,nbPoints = theCloud->size(); if (nbPoints==0) return 0; CCLib::ReferenceCloud* newCloud = new CCLib::ReferenceCloud(theCloud); //less than 4 points ? no need for calculation, we return the whole cloud if (nbPoints<4) { if (!newCloud->reserve(nbPoints)) //well, we never know ;) { //not enough memory! delete newCloud; return 0; } newCloud->addPointIndex(0,nbPoints); return newCloud; } //view point coordT Cx = viewPoint[0]; coordT Cy = viewPoint[1]; coordT Cz = viewPoint[2]; float* radius = new float[nbPoints]; if (!radius) { //not enough memory! delete newCloud; return 0; } float r,maxRadius = 0.0; //table of points coordT* pt_array = new coordT[(nbPoints+1)*3]; coordT* _pt_array = pt_array; theCloud->placeIteratorAtBegining(); //#define BACKUP_PROJECTED_CLOUDS #ifdef BACKUP_PROJECTED_CLOUDS FILE* fp = fopen("output_centered.asc","wt"); #endif double x,y,z; for (i=0;i<nbPoints;++i) { const CCVector3* P = theCloud->getNextPoint(); *(_pt_array++)=x=coordT(P->x)-Cx; *(_pt_array++)=y=coordT(P->y)-Cy; *(_pt_array++)=z=coordT(P->z)-Cz; //we pre-compute the radius ... r = (float)sqrt(x*x+y*y+z*z); //in order to determine the max radius if (maxRadius<r) maxRadius = r; radius[i] = r; #ifdef BACKUP_PROJECTED_CLOUDS fprintf(fp,"%f %f %f %f\n",x,y,z,r); #endif } //we add the view point (Cf. HPR) *(_pt_array++)=0.0; *(_pt_array++)=0.0; *(_pt_array++)=0.0; #ifdef BACKUP_PROJECTED_CLOUDS fprintf(fp,"%f %f %f %f\n",0,0,0,0); fclose(fp); #endif maxRadius *= 2.0f*pow(10.0f,fParam); _pt_array = pt_array; #ifdef BACKUP_PROJECTED_CLOUDS fp = fopen("output_transformed.asc","wt"); #endif for (i=0;i<nbPoints;++i) { //Spherical flipping r = maxRadius/radius[i]-1.0f; #ifndef BACKUP_PROJECTED_CLOUDS *(_pt_array++) *= double(r); *(_pt_array++) *= double(r); *(_pt_array++) *= double(r); #else x = *_pt_array * double(r); *(_pt_array++) = x; y = *_pt_array * double(r); *(_pt_array++) = y; z = *_pt_array * double(r); *(_pt_array++) = z; fprintf(fp,"%f %f %f %f\n",x,y,z,r); #endif } #ifdef BACKUP_PROJECTED_CLOUDS fclose(fp); #endif //we re-use the radius to record if each point belongs to the convex hull //delete[] radius; //uchar* pointBelongsToCvxHull = new uchar[nbPoints]; uchar* pointBelongsToCvxHull = (uchar*)radius; memset(pointBelongsToCvxHull,0,sizeof(uchar)*(nbPoints+1)); if (!qh_new_qhull(3,nbPoints+1,pt_array,False,(char*)"qhull QJ s Qci Tcv",0,stderr)) { vertexT *vertex,**vertexp; facetT *facet; setT *vertices; int j, i = 0; FORALLfacets { /*if (!facet->simplicial) error("convhulln: non-simplicial facet"); // should never happen with QJ */ j = 0; vertices = qh_facet3vertex (facet); FOREACHvertex_(vertices) { pointBelongsToCvxHull[qh_pointid(vertex->point)]=1; ++j; } qh_settempfree(&vertices); if (j < 3) printf("Warning, facet %d only has %d vertices\n",i,j); // likewise but less fatal i++; } }