Ejemplo n.º 1
0
CV_EXPORTS int computeNormalsPC3d(const Mat& PC, Mat& PCNormals, const int NumNeighbors, const bool FlipViewpoint, const Vec3d& viewpoint)
{
  int i;

  if (PC.cols!=3 && PC.cols!=6) // 3d data is expected
  {
    //return -1;
    CV_Error(cv::Error::BadImageSize, "PC should have 3 or 6 elements in its columns");
  }

  int sizes[2] = {PC.rows, 3};
  int sizesResult[2] = {PC.rows, NumNeighbors};
  float* dataset = new float[PC.rows*3];
  float* distances = new float[PC.rows*NumNeighbors];
  int* indices = new int[PC.rows*NumNeighbors];

  for (i=0; i<PC.rows; i++)
  {
    const float* src = PC.ptr<float>(i);
    float* dst = (float*)(&dataset[i*3]);

    dst[0] = src[0];
    dst[1] = src[1];
    dst[2] = src[2];
  }

  Mat PCInput(2, sizes, CV_32F, dataset, 0);

  void* flannIndex = indexPCFlann(PCInput);

  Mat Indices(2, sizesResult, CV_32S, indices, 0);
  Mat Distances(2, sizesResult, CV_32F, distances, 0);

  queryPCFlann(flannIndex, PCInput, Indices, Distances, NumNeighbors);
  destroyFlann(flannIndex);
  flannIndex = 0;

  PCNormals = Mat(PC.rows, 6, CV_32F);

  for (i=0; i<PC.rows; i++)
  {
    double C[3][3], mu[4];
    const float* pci = &dataset[i*3];
    float* pcr = PCNormals.ptr<float>(i);
    double nr[3];

    int* indLocal = &indices[i*NumNeighbors];

    // compute covariance matrix
    meanCovLocalPCInd(dataset, indLocal, 3, NumNeighbors, C, mu);

    // eigenvectors of covariance matrix
    Mat cov(3, 3, CV_64F), eigVect, eigVal;
    double* covData = (double*)cov.data;
    covData[0] = C[0][0];
    covData[1] = C[0][1];
    covData[2] = C[0][2];
    covData[3] = C[1][0];
    covData[4] = C[1][1];
    covData[5] = C[1][2];
    covData[6] = C[2][0];
    covData[7] = C[2][1];
    covData[8] = C[2][2];
    eigen(cov, eigVal, eigVect);
    Mat lowestEigVec;
    //the eigenvector for the lowest eigenvalue is in the last row
    eigVect.row(eigVect.rows - 1).copyTo(lowestEigVec);
    double* eigData = (double*)lowestEigVec.data;
    nr[0] = eigData[0];
    nr[1] = eigData[1];
    nr[2] = eigData[2];

    pcr[0] = pci[0];
    pcr[1] = pci[1];
    pcr[2] = pci[2];

    if (FlipViewpoint)
    {
      flipNormalViewpoint(pci, viewpoint[0], viewpoint[1], viewpoint[2], &nr[0], &nr[1], &nr[2]);
    }

    pcr[3] = (float)nr[0];
    pcr[4] = (float)nr[1];
    pcr[5] = (float)nr[2];
  }

  delete[] indices;
  delete[] distances;
  delete[] dataset;

  return 1;
}
Ejemplo n.º 2
0
// source point clouds are assumed to contain their normals
int ICP::registerModelToScene(const Mat& srcPC, const Mat& dstPC, double& residual, double pose[16])
{
  int n = srcPC.rows;

  const bool useRobustReject = m_rejectionScale>0;

  Mat srcTemp = srcPC.clone();
  Mat dstTemp = dstPC.clone();
  double meanSrc[3], meanDst[3];
  computeMeanCols(srcTemp, meanSrc);
  computeMeanCols(dstTemp, meanDst);
  double meanAvg[3]={0.5*(meanSrc[0]+meanDst[0]), 0.5*(meanSrc[1]+meanDst[1]), 0.5*(meanSrc[2]+meanDst[2])};
  subtractColumns(srcTemp, meanAvg);
  subtractColumns(dstTemp, meanAvg);

  double distSrc = computeDistToOrigin(srcTemp);
  double distDst = computeDistToOrigin(dstTemp);

  double scale = (double)n / ((distSrc + distDst)*0.5);

  srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) *= scale;
  dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) *= scale;

  Mat srcPC0 = srcTemp;
  Mat dstPC0 = dstTemp;

  // initialize pose
  matrixIdentity(4, pose);

  void* flann = indexPCFlann(dstPC0);
  Mat M = Mat::eye(4,4,CV_64F);

  double tempResidual = 0;


  // walk the pyramid
  for (int level = m_numLevels-1; level >=0; level--)
  {
    const double impact = 2;
    double div = pow((double)impact, (double)level);
    //double div2 = div*div;
    const int numSamples = cvRound((double)(n/(div)));
    const double TolP = m_tolerance*(double)(level+1)*(level+1);
    const int MaxIterationsPyr = cvRound((double)m_maxIterations/(level+1));

    // Obtain the sampled point clouds for this level: Also rotates the normals
    Mat srcPCT = transformPCPose(srcPC0, pose);

    const int sampleStep = cvRound((double)n/(double)numSamples);
    std::vector<int> srcSampleInd;

    /*
    Note by Tolga Birdal
    Downsample the model point clouds. If more optimization is required,
    one could also downsample the scene points, but I think this might
    decrease the accuracy. That's why I won't be implementing it at this
    moment.

    Also note that you have to compute a KD-tree for each level.
    */
    srcPCT = samplePCUniformInd(srcPCT, sampleStep, srcSampleInd);

    double fval_old=9999999999;
    double fval_perc=0;
    double fval_min=9999999999;
    Mat Src_Moved = srcPCT.clone();

    int i=0;

    size_t numElSrc = (size_t)Src_Moved.rows;
    int sizesResult[2] = {(int)numElSrc, 1};
    float* distances = new float[numElSrc];
    int* indices = new int[numElSrc];

    Mat Indices(2, sizesResult, CV_32S, indices, 0);
    Mat Distances(2, sizesResult, CV_32F, distances, 0);

    // use robust weighting for outlier treatment
    int* indicesModel = new int[numElSrc];
    int* indicesScene = new int[numElSrc];

    int* newI = new int[numElSrc];
    int* newJ = new int[numElSrc];

    double PoseX[16]={0};
    matrixIdentity(4, PoseX);

    while ( (!(fval_perc<(1+TolP) && fval_perc>(1-TolP))) && i<MaxIterationsPyr)
    {
      size_t di=0, selInd = 0;

      queryPCFlann(flann, Src_Moved, Indices, Distances);

      for (di=0; di<numElSrc; di++)
      {
        newI[di] = (int)di;
        newJ[di] = indices[di];
      }

      if (useRobustReject)
      {
        int numInliers = 0;
        float threshold = getRejectionThreshold(distances, Distances.rows, m_rejectionScale);
        Mat acceptInd = Distances<threshold;

        uchar *accPtr = (uchar*)acceptInd.data;
        for (int l=0; l<acceptInd.rows; l++)
        {
          if (accPtr[l])
          {
            newI[numInliers] = l;
            newJ[numInliers] = indices[l];
            numInliers++;
          }
        }
        numElSrc=numInliers;
      }

      // Step 2: Picky ICP
      // Among the resulting corresponding pairs, if more than one scene point p_i
      // is assigned to the same model point m_j, then select p_i that corresponds
      // to the minimum distance

      hashtable_int* duplicateTable = getHashtable(newJ, numElSrc, dstPC0.rows);

      for (di=0; di<duplicateTable->size; di++)
      {
        hashnode_i *node = duplicateTable->nodes[di];

        if (node)
        {
          // select the first node
          int idx = reinterpret_cast<long>(node->data)-1, dn=0;
          int dup = (int)node->key-1;
          int minIdxD = idx;
          float minDist = distances[idx];

          while ( node )
          {
            idx = reinterpret_cast<long>(node->data)-1;

            if (distances[idx] < minDist)
            {
              minDist = distances[idx];
              minIdxD = idx;
            }

            node = node->next;
            dn++;
          }

          indicesModel[ selInd ] = newI[ minIdxD ];
          indicesScene[ selInd ] = dup ;
          selInd++;
        }
      }

      hashtableDestroy(duplicateTable);

      if (selInd)
      {

        Mat Src_Match = Mat((int)selInd, srcPCT.cols, CV_64F);
        Mat Dst_Match = Mat((int)selInd, srcPCT.cols, CV_64F);

        for (di=0; di<selInd; di++)
        {
          const int indModel = indicesModel[di];
          const int indScene = indicesScene[di];
          const float *srcPt = (float*)&srcPCT.data[indModel*srcPCT.step];
          const float *dstPt = (float*)&dstPC0.data[indScene*dstPC0.step];
          double *srcMatchPt = (double*)&Src_Match.data[di*Src_Match.step];
          double *dstMatchPt = (double*)&Dst_Match.data[di*Dst_Match.step];
          int ci=0;

          for (ci=0; ci<srcPCT.cols; ci++)
          {
            srcMatchPt[ci] = (double)srcPt[ci];
            dstMatchPt[ci] = (double)dstPt[ci];
          }
        }

        Mat X;
        minimizePointToPlaneMetric(Src_Match, Dst_Match, X);

        getTransformMat(X, PoseX);
        Src_Moved = transformPCPose(srcPCT, PoseX);

        double fval = cv::norm(Src_Match, Dst_Match)/(double)(Src_Moved.rows);

        // Calculate change in error between iterations
        fval_perc=fval/fval_old;

        // Store error value
        fval_old=fval;

        if (fval < fval_min)
          fval_min = fval;
      }
      else
        break;

      i++;

    }

    double TempPose[16];
    matrixProduct44(PoseX, pose, TempPose);

    // no need to copy the last 4 rows
    for (int c=0; c<12; c++)
      pose[c] = TempPose[c];

    residual = tempResidual;

    delete[] newI;
    delete[] newJ;
    delete[] indicesModel;
    delete[] indicesScene;
    delete[] distances;
    delete[] indices;

    tempResidual = fval_min;
  }

  // Pose(1:3, 4) = Pose(1:3, 4)./scale;
  pose[3] = pose[3]/scale + meanAvg[0];
  pose[7] = pose[7]/scale + meanAvg[1];
  pose[11] = pose[11]/scale + meanAvg[2];

  // In MATLAB this would be : Pose(1:3, 4) = Pose(1:3, 4)./scale + meanAvg' - Pose(1:3, 1:3)*meanAvg';
  double Rpose[9], Cpose[3];
  poseToR(pose, Rpose);
  matrixProduct331(Rpose, meanAvg, Cpose);
  pose[3] -= Cpose[0];
  pose[7] -= Cpose[1];
  pose[11] -= Cpose[2];

  residual = tempResidual;

  destroyFlann(flann);
  return 0;
}
Ejemplo n.º 3
0
// source point clouds are assumed to contain their normals
int ICP::registerModelToScene(const Mat& srcPC, const Mat& dstPC, double& residual, Matx44d& pose)
{
  int n = srcPC.rows;

  const bool useRobustReject = m_rejectionScale>0;

  Mat srcTemp = srcPC.clone();
  Mat dstTemp = dstPC.clone();
  Vec3d meanSrc, meanDst;
  computeMeanCols(srcTemp, meanSrc);
  computeMeanCols(dstTemp, meanDst);
  Vec3d meanAvg = 0.5 * (meanSrc + meanDst);
  subtractColumns(srcTemp, meanAvg);
  subtractColumns(dstTemp, meanAvg);

  double distSrc = computeDistToOrigin(srcTemp);
  double distDst = computeDistToOrigin(dstTemp);

  double scale = (double)n / ((distSrc + distDst)*0.5);

  srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) *= scale;
  dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) *= scale;

  Mat srcPC0 = srcTemp;
  Mat dstPC0 = dstTemp;

  // initialize pose
  pose = Matx44d::eye();

  Mat M = Mat::eye(4,4,CV_64F);

  double tempResidual = 0;


  // walk the pyramid
  for (int level = m_numLevels-1; level >=0; level--)
  {
    const double impact = 2;
    double div = pow((double)impact, (double)level);
    //double div2 = div*div;
    const int numSamples = cvRound((double)(n/(div)));
    const double TolP = m_tolerance*(double)(level+1)*(level+1);
    const int MaxIterationsPyr = cvRound((double)m_maxIterations/(level+1));

    // Obtain the sampled point clouds for this level: Also rotates the normals
    Mat srcPCT = transformPCPose(srcPC0, pose);

    const int sampleStep = cvRound((double)n/(double)numSamples);

    srcPCT = samplePCUniform(srcPCT, sampleStep);
    /*
    Tolga Birdal thinks that downsampling the scene points might decrease the accuracy.
    Hamdi Sahloul, however, noticed that accuracy increased (pose residual decreased slightly).
    */
    Mat dstPCS = samplePCUniform(dstPC0, sampleStep);
    void* flann = indexPCFlann(dstPCS);

    double fval_old=9999999999;
    double fval_perc=0;
    double fval_min=9999999999;
    Mat Src_Moved = srcPCT.clone();

    int i=0;

    size_t numElSrc = (size_t)Src_Moved.rows;
    int sizesResult[2] = {(int)numElSrc, 1};
    float* distances = new float[numElSrc];
    int* indices = new int[numElSrc];

    Mat Indices(2, sizesResult, CV_32S, indices, 0);
    Mat Distances(2, sizesResult, CV_32F, distances, 0);

    // use robust weighting for outlier treatment
    int* indicesModel = new int[numElSrc];
    int* indicesScene = new int[numElSrc];

    int* newI = new int[numElSrc];
    int* newJ = new int[numElSrc];

    Matx44d PoseX = Matx44d::eye();

    while ( (!(fval_perc<(1+TolP) && fval_perc>(1-TolP))) && i<MaxIterationsPyr)
    {
      uint di=0, selInd = 0;

      queryPCFlann(flann, Src_Moved, Indices, Distances);

      for (di=0; di<numElSrc; di++)
      {
        newI[di] = di;
        newJ[di] = indices[di];
      }

      if (useRobustReject)
      {
        int numInliers = 0;
        float threshold = getRejectionThreshold(distances, Distances.rows, m_rejectionScale);
        Mat acceptInd = Distances<threshold;

        uchar *accPtr = (uchar*)acceptInd.data;
        for (int l=0; l<acceptInd.rows; l++)
        {
          if (accPtr[l])
          {
            newI[numInliers] = l;
            newJ[numInliers] = indices[l];
            numInliers++;
          }
        }
        numElSrc=numInliers;
      }

      // Step 2: Picky ICP
      // Among the resulting corresponding pairs, if more than one scene point p_i
      // is assigned to the same model point m_j, then select p_i that corresponds
      // to the minimum distance

      hashtable_int* duplicateTable = getHashtable(newJ, numElSrc, dstPCS.rows);

      for (di=0; di<duplicateTable->size; di++)
      {
        hashnode_i *node = duplicateTable->nodes[di];

        if (node)
        {
          // select the first node
          size_t idx = reinterpret_cast<size_t>(node->data)-1, dn=0;
          int dup = (int)node->key-1;
          size_t minIdxD = idx;
          float minDist = distances[idx];

          while ( node )
          {
            idx = reinterpret_cast<size_t>(node->data)-1;

            if (distances[idx] < minDist)
            {
              minDist = distances[idx];
              minIdxD = idx;
            }

            node = node->next;
            dn++;
          }

          indicesModel[ selInd ] = newI[ minIdxD ];
          indicesScene[ selInd ] = dup ;
          selInd++;
        }
      }

      hashtableDestroy(duplicateTable);

      if (selInd >= 6)
      {

        Mat Src_Match = Mat(selInd, srcPCT.cols, CV_64F);
        Mat Dst_Match = Mat(selInd, srcPCT.cols, CV_64F);

        for (di=0; di<selInd; di++)
        {
          const int indModel = indicesModel[di];
          const int indScene = indicesScene[di];
          const float *srcPt = srcPCT.ptr<float>(indModel);
          const float *dstPt = dstPCS.ptr<float>(indScene);
          double *srcMatchPt = Src_Match.ptr<double>(di);
          double *dstMatchPt = Dst_Match.ptr<double>(di);
          int ci=0;

          for (ci=0; ci<srcPCT.cols; ci++)
          {
            srcMatchPt[ci] = (double)srcPt[ci];
            dstMatchPt[ci] = (double)dstPt[ci];
          }
        }

        Vec3d rpy, t;
        minimizePointToPlaneMetric(Src_Match, Dst_Match, rpy, t);
        if (cvIsNaN(cv::trace(rpy)) || cvIsNaN(cv::norm(t)))
          break;
        getTransformMat(rpy, t, PoseX);
        Src_Moved = transformPCPose(srcPCT, PoseX);

        double fval = cv::norm(Src_Match, Dst_Match)/(double)(Src_Moved.rows);

        // Calculate change in error between iterations
        fval_perc=fval/fval_old;

        // Store error value
        fval_old=fval;

        if (fval < fval_min)
          fval_min = fval;
      }
      else
        break;

      i++;

    }

    pose = PoseX * pose;
    residual = tempResidual;

    delete[] newI;
    delete[] newJ;
    delete[] indicesModel;
    delete[] indicesScene;
    delete[] distances;
    delete[] indices;

    tempResidual = fval_min;
    destroyFlann(flann);
  }

  Matx33d Rpose;
  Vec3d Cpose;
  poseToRT(pose, Rpose, Cpose);
  Cpose = Cpose / scale + meanAvg - Rpose * meanAvg;
  rtToPose(Rpose, Cpose, pose);

  residual = tempResidual;

  return 0;
}