template <typename PointInT> void pcl::NormalEstimation<PointInT, Eigen::MatrixXf>::computeFeatureEigen (pcl::PointCloud<Eigen::MatrixXf> &output) { // Resize the output dataset output.points.resize (indices_->size (), 4); // Allocate enough space to hold the results // \note This resize is irrelevant for a radiusSearch (). std::vector<int> nn_indices (k_); std::vector<float> nn_dists (k_); output.is_dense = true; // Save a few cycles by not checking every point for NaN/Inf values if the cloud is set to dense if (input_->is_dense) { // Iterating over the entire index vector for (size_t idx = 0; idx < indices_->size (); ++idx) { if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0) { output.points (idx, 0) = output.points (idx, 1) = output.points (idx, 2) = output.points (idx, 3) = std::numeric_limits<float>::quiet_NaN (); output.is_dense = false; continue; } computePointNormal (*surface_, nn_indices, output.points (idx, 0), output.points (idx, 1), output.points (idx, 2), output.points (idx, 3)); flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_, output.points (idx, 0), output.points (idx, 1), output.points (idx, 2)); } } else { // Iterating over the entire index vector for (size_t idx = 0; idx < indices_->size (); ++idx) { if (!isFinite ((*input_)[(*indices_)[idx]]) || this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0) { output.points (idx, 0) = output.points (idx, 1) = output.points (idx, 2) = output.points (idx, 3) = std::numeric_limits<float>::quiet_NaN (); output.is_dense = false; continue; } computePointNormal (*surface_, nn_indices, output.points (idx, 0), output.points (idx, 1), output.points (idx, 2), output.points (idx, 3)); flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_, output.points (idx, 0), output.points (idx, 1), output.points (idx, 2)); } } }
template <typename PointInT, typename PointOutT> void pcl::NormalEstimation<PointInT, PointOutT>::computeFeature (PointCloudOut &output) { // Allocate enough space to hold the results // \note This resize is irrelevant for a radiusSearch (). std::vector<int> nn_indices (k_); std::vector<float> nn_dists (k_); // Iterating over the entire index vector for (size_t idx = 0; idx < indices_->size (); ++idx) { if (!this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists)) { output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN (); continue; } computePointNormal (*surface_, nn_indices, output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2], output.points[idx].curvature); flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_, output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]); } }
template <typename PointInT> void pcl16::NormalEstimationOMP<PointInT, Eigen::MatrixXf>::computeFeatureEigen (pcl16::PointCloud<Eigen::MatrixXf> &output) { float vpx, vpy, vpz; getViewPoint (vpx, vpy, vpz); output.is_dense = true; // Resize the output dataset output.points.resize (indices_->size (), 4); // GCC 4.2.x seems to segfault with "internal compiler error" on MacOS X here #if defined(_WIN32) || ((__GNUC__ > 4) && (__GNUC_MINOR__ > 2)) #pragma omp parallel for schedule (dynamic, threads_) #endif // Iterating over the entire index vector for (int idx = 0; idx < static_cast<int> (indices_->size ()); ++idx) { // Allocate enough space to hold the results // \note This resize is irrelevant for a radiusSearch (). std::vector<int> nn_indices (k_); std::vector<float> nn_dists (k_); if (!isFinite ((*input_)[(*indices_)[idx]]) || this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0) { output.points (idx, 0) = output.points (idx, 1) = output.points (idx, 2) = output.points (idx, 3) = std::numeric_limits<float>::quiet_NaN (); output.is_dense = false; continue; } // 16-bytes aligned placeholder for the XYZ centroid of a surface patch Eigen::Vector4f xyz_centroid; // Estimate the XYZ centroid compute3DCentroid (*surface_, nn_indices, xyz_centroid); // Placeholder for the 3x3 covariance matrix at each surface patch EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; // Compute the 3x3 covariance matrix computeCovarianceMatrix (*surface_, nn_indices, xyz_centroid, covariance_matrix); // Get the plane normal and surface curvature solvePlaneParameters (covariance_matrix, output.points (idx, 0), output.points (idx, 1), output.points (idx, 2), output.points (idx, 3)); flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx, vpy, vpz, output.points (idx, 0), output.points (idx, 1), output.points (idx, 2)); } }
template <typename PointInT, typename PointOutT> void pcl16::NormalEstimationOMP<PointInT, PointOutT>::computeFeature (PointCloudOut &output) { float vpx, vpy, vpz; getViewPoint (vpx, vpy, vpz); output.is_dense = true; // Iterating over the entire index vector #pragma omp parallel for schedule (dynamic, threads_) for (int idx = 0; idx < static_cast<int> (indices_->size ()); ++idx) { // Allocate enough space to hold the results // \note This resize is irrelevant for a radiusSearch (). std::vector<int> nn_indices (k_); std::vector<float> nn_dists (k_); if (!isFinite ((*input_)[(*indices_)[idx]]) || this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0) { output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN (); output.is_dense = false; continue; } // 16-bytes aligned placeholder for the XYZ centroid of a surface patch Eigen::Vector4f xyz_centroid; // Estimate the XYZ centroid compute3DCentroid (*surface_, nn_indices, xyz_centroid); // Placeholder for the 3x3 covariance matrix at each surface patch EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; // Compute the 3x3 covariance matrix computeCovarianceMatrix (*surface_, nn_indices, xyz_centroid, covariance_matrix); // Get the plane normal and surface curvature solvePlaneParameters (covariance_matrix, output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2], output.points[idx].curvature); flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx, vpy, vpz, output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]); } }
template <typename PointInT, typename PointOutT> void pcl::NormalEstimationOMP<PointInT, PointOutT>::computeFeature (PointCloudOut &output) { // Allocate enough space to hold the results // \note This resize is irrelevant for a radiusSearch (). std::vector<int> nn_indices (k_); std::vector<float> nn_dists (k_); output.is_dense = true; // Save a few cycles by not checking every point for NaN/Inf values if the cloud is set to dense if (input_->is_dense) { #ifdef _OPENMP #pragma omp parallel for shared (output) private (nn_indices, nn_dists) num_threads(threads_) #endif // Iterating over the entire index vector for (int idx = 0; idx < static_cast<int> (indices_->size ()); ++idx) { Eigen::Vector4f n; if (this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0 || !computePointNormal (*surface_, nn_indices, n, output.points[idx].curvature)) { output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN (); output.is_dense = false; continue; } output.points[idx].normal_x = n[0]; output.points[idx].normal_y = n[1]; output.points[idx].normal_z = n[2]; flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_, output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]); } } else { #ifdef _OPENMP #pragma omp parallel for shared (output) private (nn_indices, nn_dists) num_threads(threads_) #endif // Iterating over the entire index vector for (int idx = 0; idx < static_cast<int> (indices_->size ()); ++idx) { Eigen::Vector4f n; if (!isFinite ((*input_)[(*indices_)[idx]]) || this->searchForNeighbors ((*indices_)[idx], search_parameter_, nn_indices, nn_dists) == 0 || !computePointNormal (*surface_, nn_indices, n, output.points[idx].curvature)) { output.points[idx].normal[0] = output.points[idx].normal[1] = output.points[idx].normal[2] = output.points[idx].curvature = std::numeric_limits<float>::quiet_NaN (); output.is_dense = false; continue; } output.points[idx].normal_x = n[0]; output.points[idx].normal_y = n[1]; output.points[idx].normal_z = n[2]; flipNormalTowardsViewpoint (input_->points[(*indices_)[idx]], vpx_, vpy_, vpz_, output.points[idx].normal[0], output.points[idx].normal[1], output.points[idx].normal[2]); } } }
template <typename PointInT, typename PointOutT> void pcl::IntegralImageNormalEstimation<PointInT, PointOutT>::computePointNormalMirror ( const int pos_x, const int pos_y, const unsigned point_index, PointOutT &normal) { float bad_point = std::numeric_limits<float>::quiet_NaN (); const int width = input_->width; const int height = input_->height; // ============================================================== if (normal_estimation_method_ == COVARIANCE_MATRIX) { if (!init_covariance_matrix_) initCovarianceMatrixMethod (); const int start_x = pos_x - rect_width_2_; const int start_y = pos_y - rect_height_2_; const int end_x = start_x + rect_width_; const int end_y = start_y + rect_height_; unsigned count = 0; sumArea<unsigned>(start_x, start_y, end_x, end_y, width, height, boost::bind(&IntegralImage2D<float, 3>::getFiniteElementsCountSE, &integral_image_XYZ_, _1, _2, _3, _4), count); // no valid points within the rectangular reagion? if (count == 0) { normal.normal_x = normal.normal_y = normal.normal_z = normal.curvature = bad_point; return; } EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector3f center; typename IntegralImage2D<float, 3>::SecondOrderType so_elements; typename IntegralImage2D<float, 3>::ElementType tmp_center; typename IntegralImage2D<float, 3>::SecondOrderType tmp_so_elements; center[0] = 0; center[1] = 0; center[2] = 0; tmp_center[0] = 0; tmp_center[1] = 0; tmp_center[2] = 0; so_elements[0] = 0; so_elements[1] = 0; so_elements[2] = 0; so_elements[3] = 0; so_elements[4] = 0; so_elements[5] = 0; sumArea<typename IntegralImage2D<float, 3>::ElementType>(start_x, start_y, end_x, end_y, width, height, boost::bind(&IntegralImage2D<float, 3>::getFirstOrderSumSE, &integral_image_XYZ_, _1, _2, _3, _4), tmp_center); sumArea<typename IntegralImage2D<float, 3>::SecondOrderType>(start_x, start_y, end_x, end_y, width, height, boost::bind(&IntegralImage2D<float, 3>::getSecondOrderSumSE, &integral_image_XYZ_, _1, _2, _3, _4), so_elements); center[0] = float (tmp_center[0]); center[1] = float (tmp_center[1]); center[2] = float (tmp_center[2]); covariance_matrix.coeffRef (0) = static_cast<float> (so_elements [0]); covariance_matrix.coeffRef (1) = covariance_matrix.coeffRef (3) = static_cast<float> (so_elements [1]); covariance_matrix.coeffRef (2) = covariance_matrix.coeffRef (6) = static_cast<float> (so_elements [2]); covariance_matrix.coeffRef (4) = static_cast<float> (so_elements [3]); covariance_matrix.coeffRef (5) = covariance_matrix.coeffRef (7) = static_cast<float> (so_elements [4]); covariance_matrix.coeffRef (8) = static_cast<float> (so_elements [5]); covariance_matrix -= (center * center.transpose ()) / static_cast<float> (count); float eigen_value; Eigen::Vector3f eigen_vector; pcl::eigen33 (covariance_matrix, eigen_value, eigen_vector); flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, eigen_vector[0], eigen_vector[1], eigen_vector[2]); normal.getNormalVector3fMap () = eigen_vector; // Compute the curvature surface change if (eigen_value > 0.0) normal.curvature = fabsf (eigen_value / (covariance_matrix.coeff (0) + covariance_matrix.coeff (4) + covariance_matrix.coeff (8))); else normal.curvature = 0; return; } // ======================================================= else if (normal_estimation_method_ == AVERAGE_3D_GRADIENT) { if (!init_average_3d_gradient_) initAverage3DGradientMethod (); const int start_x = pos_x - rect_width_2_; const int start_y = pos_y - rect_height_2_; const int end_x = start_x + rect_width_; const int end_y = start_y + rect_height_; unsigned count_x = 0; unsigned count_y = 0; sumArea<unsigned>(start_x, start_y, end_x, end_y, width, height, boost::bind(&IntegralImage2D<float, 3>::getFiniteElementsCountSE, &integral_image_DX_, _1, _2, _3, _4), count_x); sumArea<unsigned>(start_x, start_y, end_x, end_y, width, height, boost::bind(&IntegralImage2D<float, 3>::getFiniteElementsCountSE, &integral_image_DY_, _1, _2, _3, _4), count_y); if (count_x == 0 || count_y == 0) { normal.normal_x = normal.normal_y = normal.normal_z = normal.curvature = bad_point; return; } Eigen::Vector3d gradient_x (0, 0, 0); Eigen::Vector3d gradient_y (0, 0, 0); sumArea<typename IntegralImage2D<float, 3>::ElementType>(start_x, start_y, end_x, end_y, width, height, boost::bind(&IntegralImage2D<float, 3>::getFirstOrderSumSE, &integral_image_DX_, _1, _2, _3, _4), gradient_x); sumArea<typename IntegralImage2D<float, 3>::ElementType>(start_x, start_y, end_x, end_y, width, height, boost::bind(&IntegralImage2D<float, 3>::getFirstOrderSumSE, &integral_image_DY_, _1, _2, _3, _4), gradient_y); Eigen::Vector3d normal_vector = gradient_y.cross (gradient_x); double normal_length = normal_vector.squaredNorm (); if (normal_length == 0.0f) { normal.getNormalVector3fMap ().setConstant (bad_point); normal.curvature = bad_point; return; } normal_vector /= sqrt (normal_length); float nx = static_cast<float> (normal_vector [0]); float ny = static_cast<float> (normal_vector [1]); float nz = static_cast<float> (normal_vector [2]); flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, nx, ny, nz); normal.normal_x = nx; normal.normal_y = ny; normal.normal_z = nz; normal.curvature = bad_point; return; } // ====================================================== else if (normal_estimation_method_ == AVERAGE_DEPTH_CHANGE) { if (!init_depth_change_) initAverageDepthChangeMethod (); int point_index_L_x = pos_x - rect_width_4_ - 1; int point_index_L_y = pos_y; int point_index_R_x = pos_x + rect_width_4_ + 1; int point_index_R_y = pos_y; int point_index_U_x = pos_x - 1; int point_index_U_y = pos_y - rect_height_4_; int point_index_D_x = pos_x + 1; int point_index_D_y = pos_y + rect_height_4_; if (point_index_L_x < 0) point_index_L_x = -point_index_L_x; if (point_index_U_x < 0) point_index_U_x = -point_index_U_x; if (point_index_U_y < 0) point_index_U_y = -point_index_U_y; if (point_index_R_x >= width) point_index_R_x = width-(point_index_R_x-(width-1)); if (point_index_D_x >= width) point_index_D_x = width-(point_index_D_x-(width-1)); if (point_index_D_y >= height) point_index_D_y = height-(point_index_D_y-(height-1)); const int start_x_L = pos_x - rect_width_2_; const int start_y_L = pos_y - rect_height_4_; const int end_x_L = start_x_L + rect_width_2_; const int end_y_L = start_y_L + rect_height_2_; const int start_x_R = pos_x + 1; const int start_y_R = pos_y - rect_height_4_; const int end_x_R = start_x_R + rect_width_2_; const int end_y_R = start_y_R + rect_height_2_; const int start_x_U = pos_x - rect_width_4_; const int start_y_U = pos_y - rect_height_2_; const int end_x_U = start_x_U + rect_width_2_; const int end_y_U = start_y_U + rect_height_2_; const int start_x_D = pos_x - rect_width_4_; const int start_y_D = pos_y + 1; const int end_x_D = start_x_D + rect_width_2_; const int end_y_D = start_y_D + rect_height_2_; unsigned count_L_z = 0; unsigned count_R_z = 0; unsigned count_U_z = 0; unsigned count_D_z = 0; sumArea<unsigned>(start_x_L, start_y_L, end_x_L, end_y_L, width, height, boost::bind(&IntegralImage2D<float, 1>::getFiniteElementsCountSE, &integral_image_depth_, _1, _2, _3, _4), count_L_z); sumArea<unsigned>(start_x_R, start_y_R, end_x_R, end_y_R, width, height, boost::bind(&IntegralImage2D<float, 1>::getFiniteElementsCountSE, &integral_image_depth_, _1, _2, _3, _4), count_R_z); sumArea<unsigned>(start_x_U, start_y_U, end_x_U, end_y_U, width, height, boost::bind(&IntegralImage2D<float, 1>::getFiniteElementsCountSE, &integral_image_depth_, _1, _2, _3, _4), count_U_z); sumArea<unsigned>(start_x_D, start_y_D, end_x_D, end_y_D, width, height, boost::bind(&IntegralImage2D<float, 1>::getFiniteElementsCountSE, &integral_image_depth_, _1, _2, _3, _4), count_D_z); if (count_L_z == 0 || count_R_z == 0 || count_U_z == 0 || count_D_z == 0) { normal.normal_x = normal.normal_y = normal.normal_z = normal.curvature = bad_point; return; } float mean_L_z = 0; float mean_R_z = 0; float mean_U_z = 0; float mean_D_z = 0; sumArea<float>(start_x_L, start_y_L, end_x_L, end_y_L, width, height, boost::bind(&IntegralImage2D<float, 1>::getFirstOrderSumSE, &integral_image_depth_, _1, _2, _3, _4), mean_L_z); sumArea<float>(start_x_R, start_y_R, end_x_R, end_y_R, width, height, boost::bind(&IntegralImage2D<float, 1>::getFirstOrderSumSE, &integral_image_depth_, _1, _2, _3, _4), mean_R_z); sumArea<float>(start_x_U, start_y_U, end_x_U, end_y_U, width, height, boost::bind(&IntegralImage2D<float, 1>::getFirstOrderSumSE, &integral_image_depth_, _1, _2, _3, _4), mean_U_z); sumArea<float>(start_x_D, start_y_D, end_x_D, end_y_D, width, height, boost::bind(&IntegralImage2D<float, 1>::getFirstOrderSumSE, &integral_image_depth_, _1, _2, _3, _4), mean_D_z); mean_L_z /= float (count_L_z); mean_R_z /= float (count_R_z); mean_U_z /= float (count_U_z); mean_D_z /= float (count_D_z); PointInT pointL = input_->points[point_index_L_y*width + point_index_L_x]; PointInT pointR = input_->points[point_index_R_y*width + point_index_R_x]; PointInT pointU = input_->points[point_index_U_y*width + point_index_U_x]; PointInT pointD = input_->points[point_index_D_y*width + point_index_D_x]; const float mean_x_z = mean_R_z - mean_L_z; const float mean_y_z = mean_D_z - mean_U_z; const float mean_x_x = pointR.x - pointL.x; const float mean_x_y = pointR.y - pointL.y; const float mean_y_x = pointD.x - pointU.x; const float mean_y_y = pointD.y - pointU.y; float normal_x = mean_x_y * mean_y_z - mean_x_z * mean_y_y; float normal_y = mean_x_z * mean_y_x - mean_x_x * mean_y_z; float normal_z = mean_x_x * mean_y_y - mean_x_y * mean_y_x; const float normal_length = (normal_x * normal_x + normal_y * normal_y + normal_z * normal_z); if (normal_length == 0.0f) { normal.getNormalVector3fMap ().setConstant (bad_point); normal.curvature = bad_point; return; } flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, normal_x, normal_y, normal_z); const float scale = 1.0f / sqrtf (normal_length); normal.normal_x = normal_x * scale; normal.normal_y = normal_y * scale; normal.normal_z = normal_z * scale; normal.curvature = bad_point; return; } // ======================================================== else if (normal_estimation_method_ == SIMPLE_3D_GRADIENT) { PCL_THROW_EXCEPTION (PCLException, "BORDER_POLICY_MIRROR not supported for normal estimation method SIMPLE_3D_GRADIENT"); } normal.getNormalVector3fMap ().setConstant (bad_point); normal.curvature = bad_point; return; }
template <typename PointInT, typename PointOutT> void pcl::IntegralImageNormalEstimation<PointInT, PointOutT>::computePointNormal ( const int pos_x, const int pos_y, const unsigned point_index, PointOutT &normal) { float bad_point = std::numeric_limits<float>::quiet_NaN (); if (normal_estimation_method_ == COVARIANCE_MATRIX) { if (!init_covariance_matrix_) initCovarianceMatrixMethod (); unsigned count = integral_image_XYZ_.getFiniteElementsCount (pos_x - (rect_width_2_), pos_y - (rect_height_2_), rect_width_, rect_height_); // no valid points within the rectangular reagion? if (count == 0) { normal.normal_x = normal.normal_y = normal.normal_z = normal.curvature = bad_point; return; } EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector3f center; typename IntegralImage2D<float, 3>::SecondOrderType so_elements; center = integral_image_XYZ_.getFirstOrderSum(pos_x - rect_width_2_, pos_y - rect_height_2_, rect_width_, rect_height_).template cast<float> (); so_elements = integral_image_XYZ_.getSecondOrderSum(pos_x - rect_width_2_, pos_y - rect_height_2_, rect_width_, rect_height_); covariance_matrix.coeffRef (0) = static_cast<float> (so_elements [0]); covariance_matrix.coeffRef (1) = covariance_matrix.coeffRef (3) = static_cast<float> (so_elements [1]); covariance_matrix.coeffRef (2) = covariance_matrix.coeffRef (6) = static_cast<float> (so_elements [2]); covariance_matrix.coeffRef (4) = static_cast<float> (so_elements [3]); covariance_matrix.coeffRef (5) = covariance_matrix.coeffRef (7) = static_cast<float> (so_elements [4]); covariance_matrix.coeffRef (8) = static_cast<float> (so_elements [5]); covariance_matrix -= (center * center.transpose ()) / static_cast<float> (count); float eigen_value; Eigen::Vector3f eigen_vector; pcl::eigen33 (covariance_matrix, eigen_value, eigen_vector); flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, eigen_vector[0], eigen_vector[1], eigen_vector[2]); normal.getNormalVector3fMap () = eigen_vector; // Compute the curvature surface change if (eigen_value > 0.0) normal.curvature = fabsf (eigen_value / (covariance_matrix.coeff (0) + covariance_matrix.coeff (4) + covariance_matrix.coeff (8))); else normal.curvature = 0; return; } else if (normal_estimation_method_ == AVERAGE_3D_GRADIENT) { if (!init_average_3d_gradient_) initAverage3DGradientMethod (); unsigned count_x = integral_image_DX_.getFiniteElementsCount (pos_x - rect_width_2_, pos_y - rect_height_2_, rect_width_, rect_height_); unsigned count_y = integral_image_DY_.getFiniteElementsCount (pos_x - rect_width_2_, pos_y - rect_height_2_, rect_width_, rect_height_); if (count_x == 0 || count_y == 0) { normal.normal_x = normal.normal_y = normal.normal_z = normal.curvature = bad_point; return; } Eigen::Vector3d gradient_x = integral_image_DX_.getFirstOrderSum (pos_x - rect_width_2_, pos_y - rect_height_2_, rect_width_, rect_height_); Eigen::Vector3d gradient_y = integral_image_DY_.getFirstOrderSum (pos_x - rect_width_2_, pos_y - rect_height_2_, rect_width_, rect_height_); Eigen::Vector3d normal_vector = gradient_y.cross (gradient_x); double normal_length = normal_vector.squaredNorm (); if (normal_length == 0.0f) { normal.getNormalVector3fMap ().setConstant (bad_point); normal.curvature = bad_point; return; } normal_vector /= sqrt (normal_length); float nx = static_cast<float> (normal_vector [0]); float ny = static_cast<float> (normal_vector [1]); float nz = static_cast<float> (normal_vector [2]); flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, nx, ny, nz); normal.normal_x = nx; normal.normal_y = ny; normal.normal_z = nz; normal.curvature = bad_point; return; } else if (normal_estimation_method_ == AVERAGE_DEPTH_CHANGE) { if (!init_depth_change_) initAverageDepthChangeMethod (); // width and height are at least 3 x 3 unsigned count_L_z = integral_image_depth_.getFiniteElementsCount (pos_x - rect_width_2_, pos_y - rect_height_4_, rect_width_2_, rect_height_2_); unsigned count_R_z = integral_image_depth_.getFiniteElementsCount (pos_x + 1 , pos_y - rect_height_4_, rect_width_2_, rect_height_2_); unsigned count_U_z = integral_image_depth_.getFiniteElementsCount (pos_x - rect_width_4_, pos_y - rect_height_2_, rect_width_2_, rect_height_2_); unsigned count_D_z = integral_image_depth_.getFiniteElementsCount (pos_x - rect_width_4_, pos_y + 1 , rect_width_2_, rect_height_2_); if (count_L_z == 0 || count_R_z == 0 || count_U_z == 0 || count_D_z == 0) { normal.normal_x = normal.normal_y = normal.normal_z = normal.curvature = bad_point; return; } float mean_L_z = static_cast<float> (integral_image_depth_.getFirstOrderSum (pos_x - rect_width_2_, pos_y - rect_height_4_, rect_width_2_, rect_height_2_) / count_L_z); float mean_R_z = static_cast<float> (integral_image_depth_.getFirstOrderSum (pos_x + 1 , pos_y - rect_height_4_, rect_width_2_, rect_height_2_) / count_R_z); float mean_U_z = static_cast<float> (integral_image_depth_.getFirstOrderSum (pos_x - rect_width_4_, pos_y - rect_height_2_, rect_width_2_, rect_height_2_) / count_U_z); float mean_D_z = static_cast<float> (integral_image_depth_.getFirstOrderSum (pos_x - rect_width_4_, pos_y + 1 , rect_width_2_, rect_height_2_) / count_D_z); PointInT pointL = input_->points[point_index - rect_width_4_ - 1]; PointInT pointR = input_->points[point_index + rect_width_4_ + 1]; PointInT pointU = input_->points[point_index - rect_height_4_ * input_->width - 1]; PointInT pointD = input_->points[point_index + rect_height_4_ * input_->width + 1]; const float mean_x_z = mean_R_z - mean_L_z; const float mean_y_z = mean_D_z - mean_U_z; const float mean_x_x = pointR.x - pointL.x; const float mean_x_y = pointR.y - pointL.y; const float mean_y_x = pointD.x - pointU.x; const float mean_y_y = pointD.y - pointU.y; float normal_x = mean_x_y * mean_y_z - mean_x_z * mean_y_y; float normal_y = mean_x_z * mean_y_x - mean_x_x * mean_y_z; float normal_z = mean_x_x * mean_y_y - mean_x_y * mean_y_x; const float normal_length = (normal_x * normal_x + normal_y * normal_y + normal_z * normal_z); if (normal_length == 0.0f) { normal.getNormalVector3fMap ().setConstant (bad_point); normal.curvature = bad_point; return; } flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, normal_x, normal_y, normal_z); const float scale = 1.0f / sqrtf (normal_length); normal.normal_x = normal_x * scale; normal.normal_y = normal_y * scale; normal.normal_z = normal_z * scale; normal.curvature = bad_point; return; } else if (normal_estimation_method_ == SIMPLE_3D_GRADIENT) { if (!init_simple_3d_gradient_) initSimple3DGradientMethod (); // this method does not work if lots of NaNs are in the neighborhood of the point Eigen::Vector3d gradient_x = integral_image_XYZ_.getFirstOrderSum (pos_x + rect_width_2_, pos_y - rect_height_2_, 1, rect_height_) - integral_image_XYZ_.getFirstOrderSum (pos_x - rect_width_2_, pos_y - rect_height_2_, 1, rect_height_); Eigen::Vector3d gradient_y = integral_image_XYZ_.getFirstOrderSum (pos_x - rect_width_2_, pos_y + rect_height_2_, rect_width_, 1) - integral_image_XYZ_.getFirstOrderSum (pos_x - rect_width_2_, pos_y - rect_height_2_, rect_width_, 1); Eigen::Vector3d normal_vector = gradient_y.cross (gradient_x); double normal_length = normal_vector.squaredNorm (); if (normal_length == 0.0f) { normal.getNormalVector3fMap ().setConstant (bad_point); normal.curvature = bad_point; return; } normal_vector /= sqrt (normal_length); float nx = static_cast<float> (normal_vector [0]); float ny = static_cast<float> (normal_vector [1]); float nz = static_cast<float> (normal_vector [2]); flipNormalTowardsViewpoint (input_->points[point_index], vpx_, vpy_, vpz_, nx, ny, nz); normal.normal_x = nx; normal.normal_y = ny; normal.normal_z = nz; normal.curvature = bad_point; return; } normal.getNormalVector3fMap ().setConstant (bad_point); normal.curvature = bad_point; return; }