void getRandomPose(double Pose[16]) { double R[9], t[3]; srand((unsigned int)time(0)); getRandomRotation(R); t[0] = (float)rand()/(float)(RAND_MAX); t[1] = (float)rand()/(float)(RAND_MAX); t[2] = (float)rand()/(float)(RAND_MAX); rtToPose(R,t,Pose); }
// source point clouds are assumed to contain their normals int ICP::registerModelToScene(const Mat& srcPC, const Mat& dstPC, double& residual, Matx44d& pose) { int n = srcPC.rows; const bool useRobustReject = m_rejectionScale>0; Mat srcTemp = srcPC.clone(); Mat dstTemp = dstPC.clone(); Vec3d meanSrc, meanDst; computeMeanCols(srcTemp, meanSrc); computeMeanCols(dstTemp, meanDst); Vec3d meanAvg = 0.5 * (meanSrc + meanDst); subtractColumns(srcTemp, meanAvg); subtractColumns(dstTemp, meanAvg); double distSrc = computeDistToOrigin(srcTemp); double distDst = computeDistToOrigin(dstTemp); double scale = (double)n / ((distSrc + distDst)*0.5); srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) *= scale; dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) *= scale; Mat srcPC0 = srcTemp; Mat dstPC0 = dstTemp; // initialize pose pose = Matx44d::eye(); Mat M = Mat::eye(4,4,CV_64F); double tempResidual = 0; // walk the pyramid for (int level = m_numLevels-1; level >=0; level--) { const double impact = 2; double div = pow((double)impact, (double)level); //double div2 = div*div; const int numSamples = cvRound((double)(n/(div))); const double TolP = m_tolerance*(double)(level+1)*(level+1); const int MaxIterationsPyr = cvRound((double)m_maxIterations/(level+1)); // Obtain the sampled point clouds for this level: Also rotates the normals Mat srcPCT = transformPCPose(srcPC0, pose); const int sampleStep = cvRound((double)n/(double)numSamples); srcPCT = samplePCUniform(srcPCT, sampleStep); /* Tolga Birdal thinks that downsampling the scene points might decrease the accuracy. Hamdi Sahloul, however, noticed that accuracy increased (pose residual decreased slightly). */ Mat dstPCS = samplePCUniform(dstPC0, sampleStep); void* flann = indexPCFlann(dstPCS); double fval_old=9999999999; double fval_perc=0; double fval_min=9999999999; Mat Src_Moved = srcPCT.clone(); int i=0; size_t numElSrc = (size_t)Src_Moved.rows; int sizesResult[2] = {(int)numElSrc, 1}; float* distances = new float[numElSrc]; int* indices = new int[numElSrc]; Mat Indices(2, sizesResult, CV_32S, indices, 0); Mat Distances(2, sizesResult, CV_32F, distances, 0); // use robust weighting for outlier treatment int* indicesModel = new int[numElSrc]; int* indicesScene = new int[numElSrc]; int* newI = new int[numElSrc]; int* newJ = new int[numElSrc]; Matx44d PoseX = Matx44d::eye(); while ( (!(fval_perc<(1+TolP) && fval_perc>(1-TolP))) && i<MaxIterationsPyr) { uint di=0, selInd = 0; queryPCFlann(flann, Src_Moved, Indices, Distances); for (di=0; di<numElSrc; di++) { newI[di] = di; newJ[di] = indices[di]; } if (useRobustReject) { int numInliers = 0; float threshold = getRejectionThreshold(distances, Distances.rows, m_rejectionScale); Mat acceptInd = Distances<threshold; uchar *accPtr = (uchar*)acceptInd.data; for (int l=0; l<acceptInd.rows; l++) { if (accPtr[l]) { newI[numInliers] = l; newJ[numInliers] = indices[l]; numInliers++; } } numElSrc=numInliers; } // Step 2: Picky ICP // Among the resulting corresponding pairs, if more than one scene point p_i // is assigned to the same model point m_j, then select p_i that corresponds // to the minimum distance hashtable_int* duplicateTable = getHashtable(newJ, numElSrc, dstPCS.rows); for (di=0; di<duplicateTable->size; di++) { hashnode_i *node = duplicateTable->nodes[di]; if (node) { // select the first node size_t idx = reinterpret_cast<size_t>(node->data)-1, dn=0; int dup = (int)node->key-1; size_t minIdxD = idx; float minDist = distances[idx]; while ( node ) { idx = reinterpret_cast<size_t>(node->data)-1; if (distances[idx] < minDist) { minDist = distances[idx]; minIdxD = idx; } node = node->next; dn++; } indicesModel[ selInd ] = newI[ minIdxD ]; indicesScene[ selInd ] = dup ; selInd++; } } hashtableDestroy(duplicateTable); if (selInd >= 6) { Mat Src_Match = Mat(selInd, srcPCT.cols, CV_64F); Mat Dst_Match = Mat(selInd, srcPCT.cols, CV_64F); for (di=0; di<selInd; di++) { const int indModel = indicesModel[di]; const int indScene = indicesScene[di]; const float *srcPt = srcPCT.ptr<float>(indModel); const float *dstPt = dstPCS.ptr<float>(indScene); double *srcMatchPt = Src_Match.ptr<double>(di); double *dstMatchPt = Dst_Match.ptr<double>(di); int ci=0; for (ci=0; ci<srcPCT.cols; ci++) { srcMatchPt[ci] = (double)srcPt[ci]; dstMatchPt[ci] = (double)dstPt[ci]; } } Vec3d rpy, t; minimizePointToPlaneMetric(Src_Match, Dst_Match, rpy, t); if (cvIsNaN(cv::trace(rpy)) || cvIsNaN(cv::norm(t))) break; getTransformMat(rpy, t, PoseX); Src_Moved = transformPCPose(srcPCT, PoseX); double fval = cv::norm(Src_Match, Dst_Match)/(double)(Src_Moved.rows); // Calculate change in error between iterations fval_perc=fval/fval_old; // Store error value fval_old=fval; if (fval < fval_min) fval_min = fval; } else break; i++; } pose = PoseX * pose; residual = tempResidual; delete[] newI; delete[] newJ; delete[] indicesModel; delete[] indicesScene; delete[] distances; delete[] indices; tempResidual = fval_min; destroyFlann(flann); } Matx33d Rpose; Vec3d Cpose; poseToRT(pose, Rpose, Cpose); Cpose = Cpose / scale + meanAvg - Rpose * meanAvg; rtToPose(Rpose, Cpose, pose); residual = tempResidual; return 0; }
static void getTransformMat(Vec3d& euler, Vec3d& t, Matx44d& Pose) { Matx33d R; eulerToDCM(euler, R); rtToPose(R, t, Pose); }