Ejemplo n.º 1
0
  void ObjectTracker :: addNewDetection(const ObjectMatch& match)
  {
    m_raw_pose = match.pose()->pose3d();

    cv::Vec3f r = match.pose()->pose3d().cvRodriguesRotation();
    cv::Vec3f t = match.pose()->pose3d().cvTranslation();

    cv::Vec3f dr = r - m_estimated_pose.cvRodriguesRotation();
    cv::Vec3f dt = t - m_estimated_pose.cvTranslation();

    cv::Mat1f measurement(12,1);
    measurement << t[0], dt[0], t[1], dt[1], t[2], dt[2],
                   r[0], dr[0], r[1], dr[1], r[2], dr[2];

    Mat1f new_state = m_kalman.correct(measurement);
    m_estimated_pose.resetCameraTransform();
    m_estimated_pose.applyTransformBeforeRodrigues(Vec3f(new_state(0,0), new_state(2,0), new_state(4,0)),
                                                   Vec3f(new_state(6,0), new_state(8,0), new_state(10,0)));
    ntk_dbg_print(m_last_pose, 1);
    ntk_dbg_print(m_estimated_pose , 1);
    ntk_dbg_print(m_raw_pose , 1);
    m_last_projected_bounding_rect = bounding_box(match.projectedBoundingRect());
    m_nframes_without_detection = 0;
    ++m_nframes_with_detection;
    m_has_updated_pose = true;
  }
Ejemplo n.º 2
0
bool RelativePoseEstimatorICP<PointT> :: preprocessClouds()
{
    if (m_target_cloud->points.size() < 1)
    {
        ntk_dbg(1) << "Target cloud was empty";
        return false;
    }

    PointCloudConstPtr target = m_target_cloud;
    PointCloudConstPtr source = m_source_cloud;

    ntk_dbg_print(target->points.size(), 1);
    ntk_dbg_print(source->points.size(), 1);

    m_filtered_source.reset(new pcl::PointCloud<PointT>());
    m_filtered_target.reset(new pcl::PointCloud<PointT>());

    PointCloudPtr temp_source (new pcl::PointCloud<PointT>());
    PointCloudPtr temp_target (new pcl::PointCloud<PointT>());

    pcl::PassThrough<PointT> filter;
    filter.setInputCloud (source);
    filter.filter (*temp_source);

    filter.setInputCloud (target);
    filter.filter (*temp_target);

    pcl::VoxelGrid<PointT> grid;
    grid.setLeafSize (m_voxel_leaf_size, m_voxel_leaf_size, m_voxel_leaf_size);

    grid.setInputCloud(temp_source);
    grid.filter(*m_filtered_source);

    grid.setInputCloud(temp_target);
    grid.filter(*m_filtered_target);

    ntk_dbg_print(temp_source->points.size(), 1);
    ntk_dbg_print(m_filtered_source->points.size(), 1);
    ntk_dbg_print(m_filtered_target->points.size(), 1);

    if (m_filtered_source->points.size() < 1 || m_filtered_target->points.size() < 1)
    {
        ntk_dbg(1) << "Warning: no remaining points after filtering.";
        return false;
    }

    if (m_initial_pose.isValid())
    {
        Eigen::Affine3f H = toPclInvCameraTransform(m_initial_pose);
        this->transformPointCloud(*m_filtered_source, *m_filtered_source, H);
    }

    if (m_target_pose.isValid())
    {
        Eigen::Affine3f H = toPclInvCameraTransform(m_target_pose);
        this->transformPointCloud(*m_filtered_target, *m_filtered_target, H);
    }

    return true;
}
Ejemplo n.º 3
0
  double cv_emd_distance(const std::vector<double>& h1, const std::vector<double>& h2,
                         int histogram_cycle)
  {
    int n = h1.size();

#if 0
    ntk_dbg_print(h1.size(), 1);
    ntk_dbg_print(h2.size(), 1);

    double norm1 = std::accumulate(stl_bounds(h1), 0.0);
    double norm2 = std::accumulate(stl_bounds(h2), 0.0);
    ntk_dbg_print(norm1, 1);
    ntk_dbg_print(norm2, 1);
#endif

    CvMat* sig1 = cvCreateMat(n, 3, CV_32FC1);
    CvMat* sig2 = cvCreateMat(n, 3, CV_32FC1);

    foreach_idx(i, h1)
    {
      int row = i/histogram_cycle;
      int col = i%histogram_cycle;
      cvSet2D(sig1, i, 0, cvScalar(h1[i]));
      cvSet2D(sig1, i, 1, cvScalar(row));
      cvSet2D(sig1, i, 2, cvScalar(col));

      cvSet2D(sig2, i, 0, cvScalar(h2[i]));
      cvSet2D(sig2, i, 1, cvScalar(row));
      cvSet2D(sig2, i, 2, cvScalar(col));
    }
Ejemplo n.º 4
0
  FeatureIndexerKdt::MatchResults
  FeatureIndexerKdt :: findMatches(const LocatedFeature& p) const
  {
    float min_dist = std::numeric_limits<float>::max();
    float min_dist2 = std::numeric_limits<float>::max();

    const LocatedFeature* closest_id = 0;
    const LocatedFeature* closest_id2 = 0;

    std::vector<float> query;
    prepareFlannQuery(query, p);
    std::vector<int> indices(2, -1);
    std::vector<float> dists(2, 0);
    {
      QWriteLocker locker(&m_lock);
      m_flann_index->knnSearch(query, indices, dists, 2, cv::flann::SearchParams(64));
    }

    MatchResults c;
    if (indices[0] >= 0 && indices[1] >= 0)
    {
      const LocatedFeature* p1 = m_points[indices[0]];
      const LocatedFeature* p2 = m_points[indices[1]];

      // No muy eficaz
      if (0
          && p.location().has_depth
          && p1->location().has_depth)
      {
        double scale_ratio = (p.location().scale / p1->location().scale);
        double z_ratio = (p.location().p_image.z / p1->location().p_image.z);

        double diff = (scale_ratio * z_ratio);
        if (diff < 1) diff = 1.0 / diff;
        if (diff > 1.7) // scale does not match
        {
          // ++nb_filtered;
          if ((dists[0]/dists[1]) < 0.5)
          {
            ntk_dbg_print(diff, 1);
            ntk_dbg_print(scale_ratio, 1);
            ntk_dbg_print(z_ratio, 1);
            ntk_dbg_print(p.location().p_image.z, 1);
            ntk_dbg_print(p1->location().p_image.z, 1);
          }
          return c;
        }
      }

      dists[1] += 1; // avoid degenerate case where dist is 0.
      c.matches.push_back(Match(p1, dists[0]/dists[1]));
    }
    return c;
  }
Ejemplo n.º 5
0
void MeshRenderer :: setPose(const Pose3D& pose, float* arg_near_plane, float* arg_far_plane)
{
    VertexBufferObject& vbo = m_vertex_buffer_object;
    pose.cvCameraTransform().copyTo(vbo.model_view_matrix);
    // Transpose the matrix for OpenGL column-major.
    vbo.model_view_matrix = vbo.model_view_matrix.t();

    if (!(arg_near_plane && arg_far_plane))
    {
        estimateOptimalPlanes(pose, &m_last_near_plane, &m_last_far_plane);
    }
    else
    {
        m_last_near_plane = *arg_near_plane;
        m_last_far_plane = *arg_far_plane;
    }

    m_pbuffer->makeCurrent();
    glMatrixMode (GL_MODELVIEW);
    glLoadIdentity ();
    cv::Vec3f euler_angles = pose.cvEulerRotation();
    glTranslatef(pose.cvTranslation()[0], pose.cvTranslation()[1], pose.cvTranslation()[2]);
    glRotatef(euler_angles[2]*180.0/M_PI, 0, 0, 1);
    glRotatef(euler_angles[1]*180.0/M_PI, 0, 1, 0);
    glRotatef(euler_angles[0]*180.0/M_PI, 1, 0, 0);

    glMatrixMode (GL_PROJECTION);
    glLoadIdentity ();
    double dx = pose.imageCenterX() - (m_pbuffer->width() / 2.0);
    double dy = pose.imageCenterY() - (m_pbuffer->height() / 2.0);
    glViewport(dx, -dy, m_pbuffer->width(), m_pbuffer->height());
    if (pose.isOrthographic())
    {
        ntk_dbg_print(pose.focalX()/2, 0);
        ntk_dbg_print(pose.focalY()/2, 0);
        glOrtho(-pose.focalX()/2, pose.focalX()/2, -pose.focalY()/2, pose.focalY()/2, m_last_near_plane, m_last_far_plane);
    }
    else
    {
        double fov = (180.0/M_PI) * 2.0*atan(m_pbuffer->height()/(2.0*pose.focalY()));
        // double fov2 = (180.0/M_PI) * 2.0*atan(image.cols/(2.0*pose.focalX()));
        // ntk_dbg_print(fov2, 2);
        // gluPerspective(fov2,  double(image.rows)/image.cols, near_plane, far_plane);
        gluPerspective(fov, double(m_pbuffer->width())/m_pbuffer->height(), m_last_near_plane, m_last_far_plane);
    }

    glMatrixMode (GL_MODELVIEW);
}
Ejemplo n.º 6
0
bool RGBDGrabberFactory :: createOpenni2Grabbers(const ntk::RGBDGrabberFactory::Params &params, std::vector<GrabberData>& grabbers)
{
#ifdef NESTK_USE_OPENNI2
    if (!Openni2Driver::hasDll())
    {
        ntk_warn("OpenNI2: No dll found.\n");
        return false;
    }

    // Drivers dll are supposed to be next to the binaries.
    QDir prev = QDir::current();
    QDir::setCurrent(QApplication::applicationDirPath());

    ni2_driver = new Openni2Driver();

    if (!ni2_driver->isReady())
    {
        ntk_error("OpenNI2: %s\n", ni2_driver->getLastError().toUtf8().constData());
        return false;
    }

    Openni2Driver::SensorInfos sensorInfos;
    ni2_driver->getSensorInfos(sensorInfos);

    ntk_info("OpenNI2: Number of devices: %d\n", sensorInfos.size());
    ntk_dbg_print(sensorInfos.size(), 1);

    // Create grabbers.
    for (unsigned n = 0; n < sensorInfos.size(); ++n)
    {
        Openni2Grabber* grabber = new Openni2Grabber(*ni2_driver, sensorInfos[n].uri);

        if (params.high_resolution)
            grabber->setHighRgbResolution(true);

        grabber->setCustomBayerDecoding(false);
        grabber->setUseHardwareRegistration(params.hardware_registration);

        GrabberData data;
        data.grabber = grabber;
        data.type = OPENNI2;
        data.processor = createProcessor(OPENNI2);

        grabbers.push_back(data);
    }

    QDir::setCurrent(prev.absolutePath());

    return sensorInfos.size() > 0;
#else
    return false;
#endif
}
Ejemplo n.º 7
0
void FreenectGrabber :: run()
{
    setThreadShouldExit(false);
    m_current_image.setCalibration(m_calib_data);
    m_rgbd_image.setCalibration(m_calib_data);

    m_rgbd_image.rawRgbRef() = Mat3b(FREENECT_FRAME_H, FREENECT_FRAME_W);
    m_rgbd_image.rawDepthRef() = Mat1f(FREENECT_FRAME_H, FREENECT_FRAME_W);
    m_rgbd_image.rawIntensityRef() = Mat1f(FREENECT_FRAME_H, FREENECT_FRAME_W);

    m_current_image.rawRgbRef() = Mat3b(FREENECT_FRAME_H, FREENECT_FRAME_W);
    m_current_image.rawDepthRef() = Mat1f(FREENECT_FRAME_H, FREENECT_FRAME_W);
    m_current_image.rawIntensityRef() = Mat1f(FREENECT_FRAME_H, FREENECT_FRAME_W);

    startKinect();
    int64 last_grab_time = 0;

    while (!threadShouldExit())
    {
        waitForNewEvent(-1); // Use infinite timeout in order to honor sync mode.

        while (m_depth_transmitted || m_rgb_transmitted)
            freenect_process_events(f_ctx);

        // m_current_image.rawDepth().copyTo(m_current_image.rawAmplitudeRef());
        // m_current_image.rawDepth().copyTo(m_current_image.rawIntensityRef());

        {
            int64 grab_time = ntk::Time::getMillisecondCounter();
            ntk_dbg_print(grab_time - last_grab_time, 2);
            last_grab_time = grab_time;
            QWriteLocker locker(&m_lock);
            // FIXME: ugly hack to handle the possible time
            // gaps between rgb and IR frames in dual mode.
            if (m_dual_ir_rgb)
                m_current_image.copyTo(m_rgbd_image);
            else
                m_current_image.swap(m_rgbd_image);
            m_rgb_transmitted = true;
            m_depth_transmitted = true;
        }

        if (m_dual_ir_rgb)
            setIRMode(!m_ir_mode);
        advertiseNewFrame();
#ifdef _WIN32
        // FIXME: this is to avoid GUI freezes with libfreenect on Windows.
        // See http://groups.google.com/group/openkinect/t/b1d828d108e9e69
        Sleep(1);
#endif
    }
}
Ejemplo n.º 8
0
  void MeshRenderer :: computeProjectionMatrix(cv::Mat4b& image, const Pose3D& pose)
  {
    double near_plane, far_plane;
    estimateOptimalPlanes(pose, &near_plane, &far_plane);
    ntk_dbg_print(near_plane, 2);
    ntk_dbg_print(far_plane, 2);
    m_last_near_plane = near_plane;
    m_last_far_plane = far_plane;

    m_pbuffer->makeCurrent();
    glMatrixMode (GL_MODELVIEW);
    glLoadIdentity ();
    cv::Vec3f euler_angles = pose.cvEulerRotation();
    glTranslatef(pose.cvTranslation()[0], pose.cvTranslation()[1], pose.cvTranslation()[2]);
    glRotatef(euler_angles[2]*180.0/M_PI, 0, 0, 1);
    glRotatef(euler_angles[1]*180.0/M_PI, 0, 1, 0);
    glRotatef(euler_angles[0]*180.0/M_PI, 1, 0, 0);

    glMatrixMode (GL_PROJECTION);
    glLoadIdentity ();
    double dx = pose.imageCenterX() - (image.cols / 2.0);
    double dy = pose.imageCenterY() - (image.rows / 2.0);
    glViewport(dx, -dy, image.cols, image.rows);
    //glViewport(0, 0, image.cols, image.rows);
    if (pose.isOrthographic())
    {
      gluOrtho2D(-pose.focalX()/2, pose.focalX()/2, -pose.focalY()/2, pose.focalY()/2);
    }
    else
    {
      double fov = (180.0/M_PI) * 2.0*atan(image.rows/(2.0*pose.focalY()));
      // double fov2 = (180.0/M_PI) * 2.0*atan(image.cols/(2.0*pose.focalX()));
      // ntk_dbg_print(fov2, 2);
      // gluPerspective(fov2,  double(image.rows)/image.cols, near_plane, far_plane);
      gluPerspective(fov, double(image.cols)/image.rows, near_plane, far_plane);
    }

    glMatrixMode (GL_MODELVIEW);
  }
Ejemplo n.º 9
0
void MeshRenderer :: estimateOptimalPlanes(const Pose3D& pose, float* near_plane, float* far_plane)
{
    float min_z = std::numeric_limits<float>::max();
    float max_z = 0.01f;

    for (int i = 0; i < m_mesh->faces.size(); ++i)
    {
        const Point3f& v1 = m_mesh->vertices[m_mesh->faces[i].indices[0]];
        const Point3f& v2 = m_mesh->vertices[m_mesh->faces[i].indices[1]];
        const Point3f& v3 = m_mesh->vertices[m_mesh->faces[i].indices[2]];

        Point3f pv1 = pose.cameraTransform(v1);
        Point3f pv2 = pose.cameraTransform(v2);
        Point3f pv3 = pose.cameraTransform(v3);

        min_z = ntk::math::min(min_z,-pv1.z);
        min_z = ntk::math::min(min_z,-pv2.z);
        min_z = ntk::math::min(min_z,-pv3.z);

        max_z = ntk::math::max(max_z,-pv1.z);
        max_z = ntk::math::max(max_z,-pv2.z);
        max_z = ntk::math::max(max_z,-pv3.z);
    }

    ntk_dbg_print(min_z, 2);
    ntk_dbg_print(max_z, 2);

    if (min_z < 0)
        min_z = 0.01f;

    if (max_z < min_z)
        max_z = (min_z*2);

    *near_plane = min_z*0.9f;
    *far_plane = max_z*1.1f;
}
Ejemplo n.º 10
0
    void triangulate(const Vertices& vertices, PolygonMesh& output)
    {
        const int n_vertices = vertices.vertices.size();

        if (n_vertices <= 3)
        {
            output.polygons.push_back(vertices);
            return;
        }

        std::vector<uint32_t> remaining_vertices(n_vertices);
        if (area(vertices.vertices) > 0) // clockwise?
        {
            remaining_vertices = vertices.vertices;
        }
        else
        {
            for (int v = 0; v < n_vertices; v++)
                remaining_vertices[v] = vertices.vertices[n_vertices - 1 - v];
        }

        // Avoid closed loops.
        if (remaining_vertices.front() == remaining_vertices.back())
            remaining_vertices.erase(remaining_vertices.end()-1);

        // null_iterations avoids infinite loops if the polygon is not simple.
        for (int u = remaining_vertices.size() - 1, null_iterations = 0;
                remaining_vertices.size() > 2 && null_iterations < remaining_vertices.size()*2;
                ++null_iterations, u=(u+1)%remaining_vertices.size())
        {
            int v = (u + 1) % remaining_vertices.size();
            int w = (u + 2) % remaining_vertices.size();

            if (isEar(u, v, w, remaining_vertices))
            {
                Vertices triangle;
                triangle.vertices.resize(3);
                triangle.vertices[0] = remaining_vertices[u];
                triangle.vertices[1] = remaining_vertices[v];
                triangle.vertices[2] = remaining_vertices[w];
                output.polygons.push_back(triangle);
                remaining_vertices.erase(remaining_vertices.begin()+v);
                null_iterations = 0;
            }
        }
        for (int i = 0; i < remaining_vertices.size(); ++i)
            ntk_dbg_print(remaining_vertices[i], 1);
    }
Ejemplo n.º 11
0
bool RelativePoseEstimatorICP<PointT> :: estimateNewPose()
{
    bool ok = preprocessClouds();
    if (!ok)
        return false;

    if (0)
    {
        ntk::Mesh mesh1, mesh2;
        pointCloudToMesh(mesh1, *m_filtered_source);
        pointCloudToMesh(mesh2, *m_filtered_target);
        mesh1.saveToPlyFile("debug_mesh_source.ply");
        mesh2.saveToPlyFile("debug_mesh_target.ply");
    }

    // The source cloud should be smaller than the target one.
    bool swap_source_and_target = false;
    if (m_filtered_source->points.size() > 2.0 * m_filtered_target->points.size())
    {
        swap_source_and_target = true;
        std::swap(m_filtered_source, m_filtered_target);
    }

    Pose3D relative_pose;
    PointCloudType cloud_reg;
    ok = computeRegistration(relative_pose, m_filtered_source, m_filtered_target, cloud_reg);

    if (!ok)
        return false;

    if (swap_source_and_target)
        relative_pose.invert();

    ntk_dbg_print(relative_pose.cvTranslation(), 1);

#if 0
    m_estimated_pose = m_target_pose;
    m_estimated_pose.applyTransformBefore(relative_pose);
    m_estimated_pose.applyTransformAfter(m_initial_pose);
#endif
    // Be careful. Poses are actually inverse camera transform in 3D space.
    // inverse(newpose) = inverse(relative_pose) * inverse(target_pose)
    m_estimated_pose = m_initial_pose;
    m_estimated_pose.applyTransformBefore(relative_pose.inverted());

    return true;
}
Ejemplo n.º 12
0
  void SiftObjectDetector :: initializeFindObjects()
  {
    super::initializeFindObjects();

    m_point_matches.clear();
    m_image_sift_points.clear();

    std::list<LocatedFeature*> points;

    ntk::TimeCount tc_init("initialize", 1);
    compute_feature_points(points,
                           m_data.image,
                           siftDatabase().featureType());
    tc_init.elapsedMsecs(" compute feature points: ");

    std::copy(stl_bounds(points), std::back_inserter(m_image_sift_points));
    ntk_dbg_print(m_image_sift_points.size(), 1);
    tc_init.stop();
  }
Ejemplo n.º 13
0
bool RGBDGrabberFactory :: createOpenniGrabbers(const ntk::RGBDGrabberFactory::Params &params, std::vector<GrabberData>& grabbers)
{
#ifdef NESTK_USE_OPENNI
    if (!OpenniDriver::hasDll())
    {
        ntk_warn("No OpenNI dll found.\n");
        return false;
    }

    // Config dir is supposed to be next to the binaries.
    QDir prev = QDir::current();
    QDir::setCurrent(QApplication::applicationDirPath());

    ni_driver = new OpenniDriver();

    ntk_info("Number of Openni devices: %d\n", ni_driver->numDevices());
    ntk_dbg_print(ni_driver->numDevices(), 1);

    // Create grabbers.
    for (int i = 0; i < ni_driver->numDevices(); ++i)
    {
        OpenniGrabber* k_grabber = new OpenniGrabber(*ni_driver, i);
        k_grabber->setTrackUsers(false);
        if (params.high_resolution)
            k_grabber->setHighRgbResolution(true);

        k_grabber->setCustomBayerDecoding(false);
        k_grabber->setUseHardwareRegistration(params.hardware_registration);

        GrabberData new_data;
        new_data.grabber = k_grabber;
        new_data.type = OPENNI;
        new_data.processor = createProcessor(OPENNI);
        grabbers.push_back(new_data);
    }

    QDir::setCurrent(prev.absolutePath());

    return ni_driver->numDevices() > 0;
#else
    return false;
#endif
}
Ejemplo n.º 14
0
bool RelativePoseEstimatorICP<PointT> ::
computeRegistration(Pose3D& relative_pose,
                    PointCloudConstPtr source_cloud,
                    PointCloudConstPtr target_cloud,
                    PointCloudType& aligned_cloud)
{
    pcl::IterativeClosestPoint<PointT, PointT> reg;
    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-10);
    reg.setRANSACOutlierRejectionThreshold(m_ransac_outlier_threshold);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setInputCloud (source_cloud);
    reg.setInputTarget (target_cloud);
    reg.align (aligned_cloud);

    if (0)
    {
        ntk::Mesh mesh1, mesh2;
        pointCloudToMesh(mesh1, aligned_cloud);
        pointCloudToMesh(mesh2, *target_cloud);
        mesh1.saveToPlyFile("debug_icp_1.ply");
        mesh2.saveToPlyFile("debug_icp_2.ply");
    }

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    ntk_dbg_print(reg.getFitnessScore(), 1);

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    relative_pose.setCameraTransform(T);
    return true;
}
Ejemplo n.º 15
0
void RGBDCalibration::computeInfraredIntrinsicsFromDepth()
{
    depth_intrinsics.copyTo(infrared_intrinsics);
    depth_distortion.copyTo(infrared_distortion);

    double& fx = infrared_intrinsics(0,0);
    double& fy = infrared_intrinsics(1,1);
    double& cx = infrared_intrinsics(0,2);
    double& cy = infrared_intrinsics(1,2);

    ntk_dbg(1) << cv::format("fx: %f fy: %f cx: %f cy: %f\n", fx, fy, cx, cy);

    cx = cx - infraredDepthOffsetX();
    cy = cy - infraredDepthOffsetY() + 16;
    double ratio = double(infrared_size.width) / depth_size.width;
    ntk_dbg_print(ratio, 1);
    fx *= ratio;
    fy *= ratio;
    cx *= ratio;
    cy *= ratio;

    ntk_dbg(1) << cv::format("fx: %f fy: %f cx: %f cy: %f\n", fx, fy, cx, cy);
}
Ejemplo n.º 16
0
  void SiftHough :: houghpointsFromMatch(std::list<HoughPoint>& points, SiftPointMatchConstPtr match)
  {
    ntk_assert(match, "Null pointer.");
    const SiftPointTransform& point_transform = match->pointTransform();

    std::vector<unsigned char> orientation_bins(2);
    std::vector<int> scale_bins(2);
    std::vector<int> x_bins(2);
    std::vector<int> y_bins(2);

    // orientation bins
    {
      unsigned max_bins = (unsigned) (std::floor((2.0*M_PI)/m_params.delta_orientation)-1);
      double o = point_transform.orientation / m_params.delta_orientation;
      orientation_bins[0] = (unsigned) std::floor(o);
      if ((o-orientation_bins[0]) < 0.5)
      {
        if (orientation_bins[0] == 0) orientation_bins[1] = max_bins;
        else orientation_bins[1] = orientation_bins[0] - 1;
      }
      else
      {
        if (orientation_bins[0] == max_bins) orientation_bins[1] = 0;
        else orientation_bins[1] = orientation_bins[0] + 1;
      }
      ntk_dbg_print(point_transform.orientation, 2);
      ntk_dbg_print(o, 2);
      ntk_dbg_print(orientation_bins[0], 2);
      ntk_dbg_print(orientation_bins[1], 2);
    }

    // scale bins
    {
      double s = log(point_transform.scale)/log(2.0);
      scale_bins[0] = (int)floor(s);
      int s2;
      if (s > scale_bins[0]+0.5) s2 = scale_bins[0] + 1;
      else s2 = scale_bins[0] - 1;
      scale_bins[1] = s2;
    }

    const VisualObjectView& model = match->pose()->modelView();
    cv::Rect_<float> model_bbox = model.boundingRect();
    const ObjectPose& model_pose = *match->pose();

    // Vote for 16 bins in most cases.
    for (unsigned s = 0; s < scale_bins.size(); ++s)
    for (unsigned o = 0; o < orientation_bins.size(); ++o)
    {
      HoughPoint p;
      p.object_id = model.id();
      p.orientation = orientation_bins[o];
      p.scale = scale_bins[s];

      double actual_scale = pow(2.0, p.scale);
      ntk_dbg_print(actual_scale, 2);
      // double max_dim = (model_bbox.width()*actual_scale + model_bbox.height()*actual_scale) / 2.0;
      double max_dim = std::max(model_bbox.width*actual_scale, model_bbox.height*actual_scale);
      max_dim *= m_params.delta_location;
      double x = model_pose.projectedCenter().x / max_dim;
      double y = model_pose.projectedCenter().y / max_dim;
      x_bins[0] = (int)floor(x);
      x_bins[1] = x_bins[0] + ((x-x_bins[0]) < 0.5 ? -1 : 1);
      y_bins[0] = (int)floor(y);
      y_bins[1] = y_bins[0] + ((y-y_bins[0]) < 0.5 ? -1 : 1);
      for (unsigned y = 0; y < y_bins.size(); ++y)
      for (unsigned x = 0; x < x_bins.size(); ++x)
      {
        p.x = x_bins[x];
        p.y = y_bins[y];
        points.push_back(p);
      }
    }
  }
Ejemplo n.º 17
0
bool RelativePoseEstimatorICPWithNormals<PointT> ::
computeRegistration(Pose3D& relative_pose,
                    PointCloudConstPtr source_cloud,
                    PointCloudConstPtr target_cloud,
                    PointCloudType& aligned_cloud)
{
#ifndef HAVE_PCL_GREATER_THAN_1_2_0
    return super::computeRegistration(relative_pose, source_cloud, target_cloud, aligned_cloud);
#else
    pcl::IterativeClosestPoint<PointT, PointT> reg;
    typedef pcl::registration::TransformationEstimationPointToPlaneLLS<PointT, PointT> PointToPlane;
    boost::shared_ptr<PointToPlane> point_to_plane (new PointToPlane);
    reg.setTransformationEstimation (point_to_plane);

    // typedef TransformationEstimationRGBD<PointT, PointT> TransformRGBD;
    // boost::shared_ptr<TransformRGBD> transform_rgbd (new TransformRGBD);
    // reg.setTransformationEstimation (transform_rgbd);

#ifdef HAVE_PCL_GREATER_THAN_1_6_0 // rejectors are not well supported before 1.7

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorDistance> rejector_distance (new pcl::registration::CorrespondenceRejectorDistance);
    rejector_distance->setInputSource<PointT>(source_cloud);
    rejector_distance->setInputTarget<PointT>(target_cloud);
    rejector_distance->setMaximumDistance(m_distance_threshold);
    reg.addCorrespondenceRejector(rejector_distance);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorSurfaceNormal> rejector_normal (new pcl::registration::CorrespondenceRejectorSurfaceNormal);
    rejector_normal->setThreshold(cos(M_PI/4.f));
    rejector_normal->initializeDataContainer<PointT, PointT>();
    rejector_normal->setInputSource<PointT>(source_cloud);
    rejector_normal->setInputTarget<PointT>(target_cloud);
    rejector_normal->setInputNormals<PointT, PointT>(source_cloud);
    rejector_normal->setTargetNormals<PointT, PointT>(target_cloud);
    reg.addCorrespondenceRejector(rejector_normal);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorOneToOne> rejector_one_to_one (new pcl::registration::CorrespondenceRejectorOneToOne);
    reg.addCorrespondenceRejector(rejector_one_to_one);

    typedef pcl::registration::CorrespondenceRejectorSampleConsensus<PointT> RejectorConsensusT;
    boost::shared_ptr<RejectorConsensusT> rejector_ransac (new RejectorConsensusT());
    rejector_ransac->setInputSource(source_cloud);
    rejector_ransac->setInputTarget(target_cloud);
    rejector_ransac->setInlierThreshold(m_ransac_outlier_threshold);
    rejector_ransac->setMaxIterations(100);
    reg.addCorrespondenceRejector(rejector_ransac);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorVarTrimmed> rejector_var_trimmed (new pcl::registration::CorrespondenceRejectorVarTrimmed());
    rejector_var_trimmed->setInputSource<PointT>(source_cloud);
    rejector_var_trimmed->setInputTarget<PointT>(target_cloud);
    rejector_var_trimmed->setMinRatio(0.1f);
    rejector_var_trimmed->setMaxRatio(0.75f);
    reg.addCorrespondenceRejector(rejector_var_trimmed);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorTrimmed> rejector_trimmed (new pcl::registration::CorrespondenceRejectorTrimmed());
    rejector_trimmed->setMinCorrespondences(static_cast<int>(0.1f * source_cloud->size()));
    rejector_trimmed->setOverlapRatio(0.5f);
    reg.addCorrespondenceRejector(rejector_trimmed);
#endif

#if 0
    ntk::Mesh target_mesh;
    pointCloudToMesh(target_mesh, *target_cloud);
    target_mesh.saveToPlyFile("debug_target.ply");

    ntk::Mesh source_mesh;
    pointCloudToMesh(source_mesh, *source_cloud);
    source_mesh.saveToPlyFile("debug_source.ply");
#endif

    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-10);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setRANSACOutlierRejectionThreshold(m_ransac_outlier_threshold);
#ifdef HAVE_PCL_GREATER_THAN_1_6_0
    reg.setInputSource (source_cloud);
#else
    reg.setInputCloud (source_cloud);
#endif
    reg.setInputTarget (target_cloud);
    reg.align (aligned_cloud);

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    if (0)
    {
        ntk::Mesh mesh1, mesh2;
        pointCloudToMesh(mesh1, aligned_cloud);
        pointCloudToMesh(mesh2, *target_cloud);
        mesh1.saveToPlyFile("debug_icp_1.ply");
        mesh2.saveToPlyFile("debug_icp_2.ply");
    }

#if 0
    ntk::Mesh debug_mesh;
    pointCloudToMesh(debug_mesh, aligned_cloud);
    debug_mesh.saveToPlyFile("debug_aligned.ply");
#endif

    ntk_dbg_print(reg.getFitnessScore(), 1);

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    relative_pose.setCameraTransform(T);
#endif
    return true;
}
bool RelativePoseEstimatorICP :: estimateNewPose(const RGBDImage& image)
{
    if (m_ref_cloud.points.size() < 1)
    {
        ntk_dbg(1) << "Reference cloud was empty";
        return false;
    }

    PointCloud<PointXYZ>::Ptr target = m_ref_cloud.makeShared();
    PointCloud<PointXYZ>::Ptr source (new PointCloud<PointXYZ>());
    rgbdImageToPointCloud(*source, image);

    PointCloud<PointXYZ>::Ptr filtered_source (new PointCloud<PointXYZ>());
    PointCloud<PointXYZ>::Ptr filtered_target (new PointCloud<PointXYZ>());

    pcl::VoxelGrid<pcl::PointXYZ> grid;
    grid.setLeafSize (m_voxel_leaf_size, m_voxel_leaf_size, m_voxel_leaf_size);

    grid.setInputCloud(source);
    grid.filter(*filtered_source);

    grid.setInputCloud(target);
    grid.filter(*filtered_target);

    PointCloud<PointXYZ> cloud_reg;
    IterativeClosestPoint<PointXYZ, PointXYZ> reg;
    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-5);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setInputCloud (filtered_source);
    reg.setInputTarget (filtered_target);
    reg.align (cloud_reg);

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    Pose3D icp_pose;
    icp_pose.setCameraTransform(T);
    ntk_dbg_print(icp_pose.cvTranslation(), 1);

    // Pose3D stores the transformation from 3D space to image.
    // So we have to invert everything so that unprojecting from
    // image to 3D gives the right transformation.
    // H2' = ICP * H1'
    m_current_pose.resetCameraTransform();
    m_current_pose = *image.calibration()->depth_pose;
    m_current_pose.invert();
    m_current_pose.applyTransformAfter(icp_pose);
    m_current_pose.invert();
    return true;
}