bool ccRegistrationTools::ICP( ccHObject* data, ccHObject* model, ccGLMatrix& transMat, double &finalScale, double& finalRMS, unsigned& finalPointCount, double minRMSDecrease, unsigned maxIterationCount, unsigned randomSamplingLimit, bool removeFarthestPoints, CCLib::ICPRegistrationTools::CONVERGENCE_TYPE method, bool adjustScale, double finalOverlapRatio/*=1.0*/, bool useDataSFAsWeights/*=false*/, bool useModelSFAsWeights/*=false*/, int filters/*=CCLib::ICPRegistrationTools::SKIP_NONE*/, int maxThreadCount/*=0*/, QWidget* parent/*=0*/) { //progress bar QScopedPointer<ccProgressDialog> progressDlg; if (parent) { progressDlg.reset(new ccProgressDialog(false, parent)); } Garbage<CCLib::GenericIndexedCloudPersist> cloudGarbage; //if the 'model' entity is a mesh, we need to sample points on it CCLib::GenericIndexedCloudPersist* modelCloud = nullptr; ccGenericMesh* modelMesh = nullptr; if (model->isKindOf(CC_TYPES::MESH)) { modelMesh = ccHObjectCaster::ToGenericMesh(model); modelCloud = modelMesh->getAssociatedCloud(); } else { modelCloud = ccHObjectCaster::ToGenericPointCloud(model); } //if the 'data' entity is a mesh, we need to sample points on it CCLib::GenericIndexedCloudPersist* dataCloud = nullptr; if (data->isKindOf(CC_TYPES::MESH)) { dataCloud = CCLib::MeshSamplingTools::samplePointsOnMesh(ccHObjectCaster::ToGenericMesh(data), s_defaultSampledPointsOnDataMesh, progressDlg.data()); if (!dataCloud) { ccLog::Error("[ICP] Failed to sample points on 'data' mesh!"); return false; } cloudGarbage.add(dataCloud); } else { dataCloud = ccHObjectCaster::ToGenericPointCloud(data); } //we activate a temporary scalar field for registration distances computation CCLib::ScalarField* dataDisplayedSF = nullptr; int oldDataSfIdx = -1, dataSfIdx = -1; //if the 'data' entity is a real ccPointCloud, we can even create a proper temporary SF for registration distances if (data->isA(CC_TYPES::POINT_CLOUD)) { ccPointCloud* pc = static_cast<ccPointCloud*>(data); dataDisplayedSF = pc->getCurrentDisplayedScalarField(); oldDataSfIdx = pc->getCurrentInScalarFieldIndex(); dataSfIdx = pc->getScalarFieldIndexByName(REGISTRATION_DISTS_SF); if (dataSfIdx < 0) dataSfIdx = pc->addScalarField(REGISTRATION_DISTS_SF); if (dataSfIdx >= 0) pc->setCurrentScalarField(dataSfIdx); else { ccLog::Error("[ICP] Couldn't create temporary scalar field! Not enough memory?"); return false; } } else { if (!dataCloud->enableScalarField()) { ccLog::Error("[ICP] Couldn't create temporary scalar field! Not enough memory?"); return false; } } //add a 'safety' margin to input ratio static double s_overlapMarginRatio = 0.2; finalOverlapRatio = std::max(finalOverlapRatio, 0.01); //1% minimum //do we need to reduce the input point cloud (so as to be close //to the theoretical number of overlapping points - but not too //low so as we are not registered yet ;) if (finalOverlapRatio < 1.0 - s_overlapMarginRatio) { //DGM we can now use 'approximate' distances as SAITO algorithm is exact (but with a coarse resolution) //level = 7 if < 1.000.000 //level = 8 if < 10.000.000 //level = 9 if > 10.000.000 int gridLevel = static_cast<int>(floor(log10(static_cast<double>(std::max(dataCloud->size(), modelCloud->size()))))) + 2; gridLevel = std::min(std::max(gridLevel, 7), 9); int result = -1; if (modelMesh) { CCLib::DistanceComputationTools::Cloud2MeshDistanceComputationParams c2mParams; c2mParams.octreeLevel = gridLevel; c2mParams.maxSearchDist = 0; c2mParams.useDistanceMap = true; c2mParams.signedDistances = false; c2mParams.flipNormals = false; c2mParams.multiThread = false; result = CCLib::DistanceComputationTools::computeCloud2MeshDistance(dataCloud, modelMesh, c2mParams, progressDlg.data()); } else { result = CCLib::DistanceComputationTools::computeApproxCloud2CloudDistance( dataCloud, modelCloud, gridLevel, -1, progressDlg.data()); } if (result < 0) { ccLog::Error("Failed to determine the max (overlap) distance (not enough memory?)"); return false; } //determine the max distance that (roughly) corresponds to the input overlap ratio ScalarType maxSearchDist = 0; { unsigned count = dataCloud->size(); std::vector<ScalarType> distances; try { distances.resize(count); } catch (const std::bad_alloc&) { ccLog::Error("Not enough memory!"); return false; } for (unsigned i=0; i<count; ++i) { distances[i] = dataCloud->getPointScalarValue(i); } ParallelSort(distances.begin(), distances.end()); //now look for the max value at 'finalOverlapRatio+margin' percent maxSearchDist = distances[static_cast<unsigned>(std::max(1.0,count*(finalOverlapRatio+s_overlapMarginRatio)))-1]; } //evntually select the points with distance below 'maxSearchDist' //(should roughly correspond to 'finalOverlapRatio + margin' percent) { CCLib::ReferenceCloud* refCloud = new CCLib::ReferenceCloud(dataCloud); cloudGarbage.add(refCloud); unsigned countBefore = dataCloud->size(); unsigned baseIncrement = static_cast<unsigned>(std::max(100.0,countBefore*finalOverlapRatio*0.05)); for (unsigned i=0; i<countBefore; ++i) { if (dataCloud->getPointScalarValue(i) <= maxSearchDist) { if ( refCloud->size() == refCloud->capacity() && !refCloud->reserve(refCloud->size() + baseIncrement) ) { ccLog::Error("Not enough memory!"); return false; } refCloud->addPointIndex(i); } } refCloud->resize(refCloud->size()); dataCloud = refCloud; unsigned countAfter = dataCloud->size(); double keptRatio = static_cast<double>(countAfter)/countBefore; ccLog::Print(QString("[ICP][Partial overlap] Selecting %1 points out of %2 (%3%) for registration").arg(countAfter).arg(countBefore).arg(static_cast<int>(100*keptRatio))); //update the relative 'final overlap' ratio finalOverlapRatio /= keptRatio; } } //weights CCLib::ScalarField* modelWeights = nullptr; CCLib::ScalarField* dataWeights = nullptr; { if (!modelMesh && useModelSFAsWeights) { if (modelCloud == dynamic_cast<CCLib::GenericIndexedCloudPersist*>(model) && model->isA(CC_TYPES::POINT_CLOUD)) { ccPointCloud* pc = static_cast<ccPointCloud*>(model); modelWeights = pc->getCurrentDisplayedScalarField(); if (!modelWeights) ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but model has no displayed scalar field!"); } else { ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but only point cloud scalar fields can be used as weights!"); } } if (useDataSFAsWeights) { if (!dataDisplayedSF) { if (dataCloud == ccHObjectCaster::ToPointCloud(data)) ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but data has no displayed scalar field!"); else ccLog::Warning("[ICP] 'useDataSFAsWeights' is true but only point cloud scalar fields can be used as weights!"); } else { dataWeights = dataDisplayedSF; } } } CCLib::ICPRegistrationTools::RESULT_TYPE result; CCLib::PointProjectionTools::Transformation transform; CCLib::ICPRegistrationTools::Parameters params; { params.convType = method; params.minRMSDecrease = minRMSDecrease; params.nbMaxIterations = maxIterationCount; params.adjustScale = adjustScale; params.filterOutFarthestPoints = removeFarthestPoints; params.samplingLimit = randomSamplingLimit; params.finalOverlapRatio = finalOverlapRatio; params.modelWeights = modelWeights; params.dataWeights = dataWeights; params.transformationFilters = filters; params.maxThreadCount = maxThreadCount; } result = CCLib::ICPRegistrationTools::Register( modelCloud, modelMesh, dataCloud, params, transform, finalRMS, finalPointCount, static_cast<CCLib::GenericProgressCallback*>(progressDlg.data())); if (result >= CCLib::ICPRegistrationTools::ICP_ERROR) { ccLog::Error("Registration failed: an error occurred (code %i)",result); } else if (result == CCLib::ICPRegistrationTools::ICP_APPLY_TRANSFO) { transMat = FromCCLibMatrix<PointCoordinateType, float>(transform.R, transform.T, transform.s); finalScale = transform.s; } //remove temporary SF (if any) if (dataSfIdx >= 0) { assert(data->isA(CC_TYPES::POINT_CLOUD)); ccPointCloud* pc = static_cast<ccPointCloud*>(data); pc->setCurrentScalarField(oldDataSfIdx); pc->deleteScalarField(dataSfIdx); } return (result < CCLib::ICPRegistrationTools::ICP_ERROR); }
CC_FILE_ERROR SavePolyline(ccPolyline* poly, QFile& file, int32_t& bytesWritten, ESRI_SHAPE_TYPE outputShapeType) { bytesWritten = 0; if (!poly) { assert(false); return CC_FERR_BAD_ENTITY_TYPE; } CCLib::GenericIndexedCloudPersist* vertices = poly->getAssociatedCloud(); if (!vertices) return CC_FERR_BAD_ENTITY_TYPE; int32_t realNumPoints = poly->size(); if (realNumPoints < 3) return CC_FERR_BAD_ENTITY_TYPE; bool is2D = poly->is2DMode(); bool isClosed = poly->isClosed(); ccBBox box = poly->getBB(); assert(box.isValid()); //Shape Type { //Byte 0: Shape Type int32_t shapeTypeInt = qToLittleEndian<int32_t>(outputShapeType); file.write((const char*)&shapeTypeInt,4); bytesWritten += 4; } //Byte 4: Box { double xMin = qToLittleEndian<double>(box.minCorner().x); double xMax = qToLittleEndian<double>(box.maxCorner().x); double yMin = qToLittleEndian<double>(box.minCorner().y); double yMax = qToLittleEndian<double>(box.maxCorner().y); //The Bounding Box for the PolyLine stored in the order Xmin, Ymin, Xmax, Ymax /*Byte 4*/file.write((const char*)&xMin,8); /*Byte 12*/file.write((const char*)&yMin,8); /*Byte 20*/file.write((const char*)&xMax,8); /*Byte 28*/file.write((const char*)&yMax,8); bytesWritten += 32; } //Byte 36: NumParts (The number of parts in the PolyLine) { int32_t numParts = qToLittleEndian<int32_t>(1); file.write((const char*)&numParts,4); bytesWritten += 4; } //Byte 40: NumPoints (The total number of points for all parts) int32_t numPoints = realNumPoints; if (isClosed) numPoints++; { int32_t numPointsLE = qToLittleEndian<int32_t>(numPoints); file.write((const char*)&numPointsLE,4); bytesWritten += 4; } //Byte 44: Parts (An array of length NumParts) { //for each part, the index of its first point in the points array int32_t startIndex = qToLittleEndian<int32_t>(0); file.write((const char*)&startIndex,4); bytesWritten += 4; } //for polygons we must list the vertices in the right order: //"The neighborhood to the right of an observer walking along //the ring in vertex order is the inside of the polygon" bool inverseOrder = false; if (outputShapeType == SHP_POLYGON || outputShapeType == SHP_POLYGON_Z) { assert(isClosed); assert(numPoints > 2); //get bounding box ccBBox box = poly->getBB(); assert(box.isValid()); //get the two largest (ordered) dimensions (dim1, dim2) CCVector3 diag = box.getDiagVec(); unsigned char minDim = diag.y < diag.x ? 1 : 0; if (diag.z < diag.u[minDim]) minDim = 2; unsigned char dim1 = ((minDim+1) % 3); unsigned char dim2 = ((minDim+2) % 3); if (diag.u[dim1] > 0) //if the polyline is flat, no need to bother ;) { //look for the top-left-most point in this 'plane' int32_t leftMostPointIndex = 0; { const CCVector3* leftMostPoint = vertices->getPoint(0); for (int32_t i=1; i<realNumPoints; ++i) { const CCVector3* P = vertices->getPoint(i); if (P->u[dim1] < leftMostPoint->u[dim1] || (P->u[dim1] == leftMostPoint->u[dim1] && P->u[dim2] < leftMostPoint->u[dim2])) { leftMostPoint = P; leftMostPointIndex = i; } } } //we simply compare the angles betwween the two edges that have the top-left-most vertex in common { const CCVector3* B = vertices->getPoint(leftMostPointIndex > 0 ? leftMostPointIndex-1 : realNumPoints-1); const CCVector3* P = vertices->getPoint(leftMostPointIndex); const CCVector3* A = vertices->getPoint(leftMostPointIndex+1 < realNumPoints ? leftMostPointIndex+1 : 0); CCVector3 PA = *A-*P; CCVector3 PB = *B-*P; PointCoordinateType anglePA = atan2(PA.u[dim2],PA.u[dim1]); //forward PointCoordinateType anglePB = atan2(PB.u[dim2],PB.u[dim1]); //backward //angles should all be in [-PI/2;0] if (anglePA < anglePB) inverseOrder = true; } } } //Points (An array of length NumPoints) { for (int32_t i=0; i<numPoints; ++i) { int32_t ii = (inverseOrder ? numPoints-1-i : i); const CCVector3* P = vertices->getPoint(ii % realNumPoints); //warning: handle loop if polyline is closed double x = qToLittleEndian<double>(P->x); double y = qToLittleEndian<double>(P->y); /*Byte 0*/file.write((const char*)&x,8); /*Byte 8*/file.write((const char*)&y,8); bytesWritten += 16; } } //3D polylines if (!is2D) { //Z boundaries { double zMin = qToLittleEndian<double>(box.minCorner().z); double zMax = qToLittleEndian<double>(box.maxCorner().z); file.write((const char*)&zMin,8); file.write((const char*)&zMax,8); bytesWritten += 16; } //Z coordinates (for each part - just one here) { for (int32_t i=0; i<numPoints; ++i) { int32_t ii = (inverseOrder ? numPoints-1-i : i); const CCVector3* P = vertices->getPoint(ii % realNumPoints); //warning: handle loop if polyline is closed double z = qToLittleEndian<double>(P->z); file.write((const char*)&z,8); bytesWritten += 8; } } //M boundaries bool hasSF = vertices->isScalarFieldEnabled(); { double mMin = ESRI_NO_DATA; double mMax = ESRI_NO_DATA; if (hasSF) { for (int32_t i=0; i<realNumPoints; ++i) { ScalarType scalar = vertices->getPointScalarValue(i); if (i != 0) { if (mMin > scalar) mMin = static_cast<double>(scalar); else if (mMax < scalar) mMax = static_cast<double>(scalar); } else { mMin = mMax = static_cast<double>(scalar); } } } mMin = qToLittleEndian<double>(mMin); mMax = qToLittleEndian<double>(mMax); file.write((const char*)&mMin,8); file.write((const char*)&mMax,8); bytesWritten += 16; } //M values (for each part - just one here) { double scalar = qToLittleEndian<double>(ESRI_NO_DATA); for (int32_t i=0; i<numPoints; ++i) { if (hasSF) { scalar = static_cast<double>(vertices->getPointScalarValue(i % realNumPoints)); //warning: handle loop if polyline is closed scalar = qToLittleEndian<double>(scalar); } file.write((const char*)&scalar,8); bytesWritten += 8; } } } return CC_FERR_NO_ERROR; }