bool RelativePoseEstimatorICP<PointT> :: preprocessClouds()
{
    if (m_target_cloud->points.size() < 1)
    {
        ntk_dbg(1) << "Target cloud was empty";
        return false;
    }

    PointCloudConstPtr target = m_target_cloud;
    PointCloudConstPtr source = m_source_cloud;

    ntk_dbg_print(target->points.size(), 1);
    ntk_dbg_print(source->points.size(), 1);

    m_filtered_source.reset(new pcl::PointCloud<PointT>());
    m_filtered_target.reset(new pcl::PointCloud<PointT>());

    PointCloudPtr temp_source (new pcl::PointCloud<PointT>());
    PointCloudPtr temp_target (new pcl::PointCloud<PointT>());

    pcl::PassThrough<PointT> filter;
    filter.setInputCloud (source);
    filter.filter (*temp_source);

    filter.setInputCloud (target);
    filter.filter (*temp_target);

    pcl::VoxelGrid<PointT> grid;
    grid.setLeafSize (m_voxel_leaf_size, m_voxel_leaf_size, m_voxel_leaf_size);

    grid.setInputCloud(temp_source);
    grid.filter(*m_filtered_source);

    grid.setInputCloud(temp_target);
    grid.filter(*m_filtered_target);

    ntk_dbg_print(temp_source->points.size(), 1);
    ntk_dbg_print(m_filtered_source->points.size(), 1);
    ntk_dbg_print(m_filtered_target->points.size(), 1);

    if (m_filtered_source->points.size() < 1 || m_filtered_target->points.size() < 1)
    {
        ntk_dbg(1) << "Warning: no remaining points after filtering.";
        return false;
    }

    if (m_initial_pose.isValid())
    {
        Eigen::Affine3f H = toPclInvCameraTransform(m_initial_pose);
        this->transformPointCloud(*m_filtered_source, *m_filtered_source, H);
    }

    if (m_target_pose.isValid())
    {
        Eigen::Affine3f H = toPclInvCameraTransform(m_target_pose);
        this->transformPointCloud(*m_filtered_target, *m_filtered_target, H);
    }

    return true;
}
bool RGBDGrabberFactory :: createSoftKineticIisuGrabbers(const ntk::RGBDGrabberFactory::Params &params, std::vector<GrabberData>& grabbers)
{
#ifdef NESTK_USE_SOFTKINETIC_IISU
    ntk_dbg(1) << "Trying softkinetic iisu backend";

    std::vector<std::string> calibration_files;

    // Config dir is supposed to be next to the binaries.
    QDir prev = QDir::current();
    QDir::setCurrent(QApplication::applicationDirPath());

    SoftKineticIisuGrabber* k_grabber = new SoftKineticIisuGrabber();
    if (!k_grabber->connectToDevice())
    {
        delete k_grabber;
        return false;
    }

    GrabberData new_data;
    new_data.grabber = k_grabber;
    new_data.type = SOFTKINETIC_IISU;
    grabbers.push_back(new_data);
    return true;
#else
    ntk_dbg(1) << "No support for softkinetic iisu, skipping.";
    return false;
#endif
}
Beispiel #3
0
bool FreenectGrabber :: connectToDevice()
{
    if (m_connected)
        return true;

    if (freenect_init(&f_ctx, NULL) < 0)
    {
        ntk_dbg(0) << "freenect_init() failed";
        return false;
    }
    ntk_dbg(0) << "Connecting to device: " << m_device_id;
    if (freenect_open_device(f_ctx, &f_dev, m_device_id) < 0)
    {
        ntk_dbg(0) << "freenect_open_device() failed";
        return false;
    }

    freenect_set_user(f_dev, this);

    freenect_set_depth_callback(f_dev, kinect_depth_db);
    freenect_set_video_callback(f_dev, kinect_video_db);

    this->setIRMode(m_ir_mode);
    m_connected = true;
    return true;
}
Beispiel #4
0
 void FeatureIndexer :: loadOrBuild()
 {
   if (!loadFromDisk())
   {
     ntk_dbg(1) << "Could not load simple sift database indexed, rebuilding.";
     rebuild();
     saveToDisk();
   }
   else
   {
     ntk_dbg(1) << "FeatureIndexer reloaded from: "
         << m_db.directory() + "/" + getIndexerFilename();
   }
 }
Beispiel #5
0
    bool isEar(int u, int v, int w, const std::vector<uint32_t>& vertices)
    {
        PointXY p_u = toPointXY(points_.points[vertices[u]]);
        PointXY p_v = toPointXY(points_.points[vertices[v]]);
        PointXY p_w = toPointXY(points_.points[vertices[w]]);

        // Avoid flat triangles.
        // FIXME: what happens if all the triangles are flat in the X-Y axis?
        const float eps = 1e-15;
        PointXY p_uv = difference(p_v, p_u);
        PointXY p_uw = difference(p_w, p_u);
        if (crossProduct(p_uv, p_uw) < eps)
        {
            ntk_dbg(1) << cv::format("FLAT: (%d, %d, %d)", u, v, w);
            return false;
        }

        // Check if any other vertex is inside the triangle.
        for (int k = 0; k < vertices.size(); k++)
        {
            if ((k == u) || (k == v) || (k == w))
                continue;
            PointXY p = toPointXY(points_.points[vertices[k]]);
            if (isInsideTriangle(p_u, p_v, p_w, p))
                return false;
        }
        return true;
    }
bool OpenniGrabber :: disconnectFromDevice()
{
    QMutexLocker ni_locker(&m_ni_mutex);
    ntk_dbg(1) << format("[Kinect %x] disconnecting", this);

    m_ni_depth_generator.StopGenerating();
    m_ni_rgb_generator.StopGenerating();
    m_ni_ir_generator.StopGenerating();
    if (m_track_users)
    {
        m_ni_user_generator.StopGenerating();
        m_ni_hands_generator.StopGenerating();
        m_ni_gesture_generator.StopGenerating();
    }
    if (m_body_event_detector)
        m_body_event_detector->shutDown();

    m_ni_depth_generator.Release();
    m_ni_rgb_generator.Release();
    m_ni_ir_generator.Release();
    m_ni_user_generator.Release();
    m_ni_hands_generator.Release();
    m_ni_gesture_generator.Release();
    m_ni_device.Release();
    m_connected = false;
    return true;
}
Beispiel #7
0
void NiteRGBDGrabber :: check_error(const XnStatus& status, const char* what) const
{
  if (status != XN_STATUS_OK)
  {
    ntk_dbg(0) << "[ERROR] " << cv::format("%s failed: %s\n", what, xnGetStatusString(status));
    ntk_throw_exception("Error in NiteRGBDGrabber.");
  }
}
Beispiel #8
0
 virtual double f (vnl_vector< double > const &x)
 {
   ntk_dbg(3) << "In f";
   e.gamma = x[0];
   if ((distrib.begin()->first-x[0]) <= 0)
     return FLT_MAX;
   estimate_weibull(distrib, e);
   return -e.logl;
 }
Beispiel #9
0
 void RGBDFrameRecorder :: setUseBinaryRaw(bool use_it)
 {
   if (QSysInfo::ByteOrder != QSysInfo::LittleEndian)
   {
     ntk_dbg(0) << "WARNING: cannot save images as binary raw, platform is not little endian";
   }
   else
   {
     m_use_binary_raw = use_it;
   }
 }
// Callback: Finished calibration
void OpenniGrabber :: calibrationFinishedCallback(XnUserID nId, bool success)
{
    ntk_dbg(1) << cv::format("Calibration finished %d\n", nId);
    if (success)
    {
        ntk_dbg(1) << cv::format("Calibration complete, start tracking user %d\n", nId);
        m_ni_user_generator.GetSkeletonCap().StartTracking(nId);
        return;
    }

    ntk_dbg(1) << cv::format("Calibration failed for user %d\n", nId);
    if (m_need_pose_to_calibrate)
    {
        m_ni_user_generator.GetPoseDetectionCap().StartPoseDetection(m_calibration_pose, nId);
    }
    else
    {
        m_ni_user_generator.GetSkeletonCap().RequestCalibration(nId, TRUE);
    }
}
Beispiel #11
0
void RGBDCalibration::computeInfraredIntrinsicsFromDepth()
{
    depth_intrinsics.copyTo(infrared_intrinsics);
    depth_distortion.copyTo(infrared_distortion);

    double& fx = infrared_intrinsics(0,0);
    double& fy = infrared_intrinsics(1,1);
    double& cx = infrared_intrinsics(0,2);
    double& cy = infrared_intrinsics(1,2);

    ntk_dbg(1) << cv::format("fx: %f fy: %f cx: %f cy: %f\n", fx, fy, cx, cy);

    cx = cx - infraredDepthOffsetX();
    cy = cy - infraredDepthOffsetY() + 16;
    double ratio = double(infrared_size.width) / depth_size.width;
    ntk_dbg_print(ratio, 1);
    fx *= ratio;
    fy *= ratio;
    cx *= ratio;
    cy *= ratio;

    ntk_dbg(1) << cv::format("fx: %f fy: %f cx: %f cy: %f\n", fx, fy, cx, cy);
}
// Callback: New user was detected
void OpenniGrabber :: newUserCallback(XnUserID nId)
{
    ntk_dbg(1) << cv::format("New User %d\n", nId);

    if (m_ni_user_generator.GetNumberOfUsers() > m_max_num_users)
        return;

    if (m_need_pose_to_calibrate)
    {
        m_ni_user_generator.GetPoseDetectionCap().StartPoseDetection(m_calibration_pose, nId);
    }
    else
    {
        m_ni_user_generator.GetSkeletonCap().RequestCalibration(nId, TRUE);
    }
}
bool RelativePoseEstimatorICP<PointT> ::
computeRegistration(Pose3D& relative_pose,
                    PointCloudConstPtr source_cloud,
                    PointCloudConstPtr target_cloud,
                    PointCloudType& aligned_cloud)
{
    pcl::IterativeClosestPoint<PointT, PointT> reg;
    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-10);
    reg.setRANSACOutlierRejectionThreshold(m_ransac_outlier_threshold);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setInputCloud (source_cloud);
    reg.setInputTarget (target_cloud);
    reg.align (aligned_cloud);

    if (0)
    {
        ntk::Mesh mesh1, mesh2;
        pointCloudToMesh(mesh1, aligned_cloud);
        pointCloudToMesh(mesh2, *target_cloud);
        mesh1.saveToPlyFile("debug_icp_1.ply");
        mesh2.saveToPlyFile("debug_icp_2.ply");
    }

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    ntk_dbg_print(reg.getFitnessScore(), 1);

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    relative_pose.setCameraTransform(T);
    return true;
}
void mean_empirical_distribution(EmpiricalDistribution& mean_distrib,
                                 const EmpiricalDistribution& raw_distrib,
                                 int n_values)
{
  mean_distrib.reset(raw_distrib.precision());
  const std::map<int, double>& distrib_map = raw_distrib.rawDistribution();
  if (distrib_map.size() < 2) return;
  int min_value = distrib_map.begin()->first;
  int max_value = distrib_map.rbegin()->first;
  if (min_value < 0)
  {
    ntk_dbg(0) << "Cannot estimate mean for negative values.";
    return;
  }
  std::vector<double> distrib_vector(max_value+1, 0);
  foreach_idx(i, distrib_vector)
  {
    distrib_vector[i] = 10000.0 * (raw_distrib.cdf(double(i) * raw_distrib.precision())
                                   - raw_distrib.cdf(double(i-1.0) * raw_distrib.precision()));
  }
void ModelAcquisitionWindow::on_saveMeshButton_clicked()
{
    if (!m_controller.modelAcquisitionController()->currentImage().withDepthDataAndCalibrated())
    {
        ntk_dbg(1) << "No image already processed.";
        return;
    }


    QString filename = QFileDialog::getSaveFileName(this,
                                                    "Save model as...",
                                                    QString("object1.model"));
    QDir dir;
    dir.mkpath(filename);
    std::string model_path = filename.toStdString();
    ntk::RGBDFrameRecorder recorder (model_path);
    recorder.setSaveRgbPose(true);
    recorder.saveCurrentFrame(m_controller.modelAcquisitionController()->currentImage());
    m_controller.modelAcquisitionController()->currentMesh().saveToPlyFile((model_path + "/mesh.ply").c_str());
    m_controller.modelAcquisitionController()->currentImage().calibration()->saveToFile((model_path + "/calibration.yml").c_str());
}
Beispiel #16
0
void
Hub::SetMatrixImageUpdate::updateImage (Image& image)
{
    // FIXME: Depend on opencv_utils functions instead of the clumsy ImageWidget static methods.

    switch (matrix.type())
    {
    case CV_MAT_TYPE(CV_8UC3):
        ImageWidget::setImage(image, cv::Mat3b(matrix));
        break;

    case CV_MAT_TYPE(CV_8UC1):
        ImageWidget::setImage(image, cv::Mat1b(matrix));
        break;

    case CV_MAT_TYPE(CV_32FC1):
        ImageWidget::setImage(image, cv::Mat1f(matrix));
        break;

    default:
        ntk_dbg(0) << "Unsupported image type";
    }
}
// Callback: Calibration started
void OpenniGrabber :: calibrationStartedCallback(XnUserID nId)
{
    ntk_dbg(1) << cv::format("Calibration started %d\n", nId);
}
// Callback: Detected a pose
void OpenniGrabber :: userPoseDetectedCallback(XnUserID nId)
{
    ntk_dbg(1) << cv::format("User Pose Detected %d\n", nId);
    m_ni_user_generator.GetPoseDetectionCap().StopPoseDetection(nId);
    m_ni_user_generator.GetSkeletonCap().RequestCalibration(nId, true);
}
// Callback: An existing user was lost
void OpenniGrabber :: lostUserCallback(XnUserID nId)
{
    ntk_dbg(1) << cv::format("Lost User %d\n", nId);
}
void OpenniGrabber :: run()
{
    m_should_exit = false;
    m_current_image.setCalibration(m_calib_data);
    m_rgbd_image.setCalibration(m_calib_data);

    // Depth
    m_rgbd_image.rawDepthRef() = Mat1f(m_calib_data->raw_depth_size);
    m_rgbd_image.rawDepthRef() = 0.f;
    m_rgbd_image.depthRef() = m_rgbd_image.rawDepthRef();
    m_current_image.rawDepthRef() = Mat1f(m_calib_data->raw_depth_size);
    m_current_image.rawDepthRef() = 0.f;
    m_current_image.depthRef() = m_current_image.rawDepthRef();

    // Color
    if (m_has_rgb)
    {
        m_rgbd_image.rawRgbRef() = Mat3b(m_calib_data->rawRgbSize());
        m_rgbd_image.rawRgbRef() = Vec3b(0,0,0);
        m_rgbd_image.rgbRef() = m_rgbd_image.rawRgbRef();
        m_current_image.rawRgbRef() = Mat3b(m_calib_data->rawRgbSize());
        m_current_image.rawRgbRef() = Vec3b(0,0,0);
        m_current_image.rgbRef() = m_current_image.rawRgbRef();

        m_rgbd_image.rawIntensityRef() = Mat1f(m_calib_data->rawRgbSize());
        m_rgbd_image.rawIntensityRef() = 0.f;
        m_rgbd_image.intensityRef() = m_rgbd_image.rawIntensityRef();
        m_current_image.rawIntensityRef() = Mat1f(m_calib_data->rawRgbSize());
        m_current_image.rawIntensityRef() = 0.f;
        m_current_image.intensityRef() = m_current_image.rawIntensityRef();
    }

    // User tracking
    m_rgbd_image.userLabelsRef() = cv::Mat1b(m_calib_data->raw_depth_size);
    m_rgbd_image.userLabelsRef() = 0u;

    if (m_track_users)
        m_rgbd_image.setSkeletonData(new Skeleton());

    m_current_image.userLabelsRef() = cv::Mat1b(m_calib_data->raw_depth_size);
    m_current_image.userLabelsRef() = 0u;

    if (m_track_users)
        m_current_image.setSkeletonData(new Skeleton());

    if (m_has_rgb)
    {
        bool mapping_required = m_calib_data->rawRgbSize() != m_calib_data->raw_depth_size;
        if (!mapping_required)
        {
            m_rgbd_image.mappedRgbRef() = m_rgbd_image.rawRgbRef();
            m_rgbd_image.mappedDepthRef() = m_rgbd_image.rawDepthRef();
            m_current_image.mappedRgbRef() = m_current_image.rawRgbRef();
            m_current_image.mappedDepthRef() = m_current_image.rawDepthRef();
        }
        else
        {
            m_rgbd_image.mappedRgbRef() = Mat3b(m_calib_data->raw_depth_size);
            m_rgbd_image.mappedRgbRef() = Vec3b(0,0,0);
            m_rgbd_image.mappedDepthRef() = Mat1f(m_calib_data->rawRgbSize());
            m_rgbd_image.mappedDepthRef() = 0.f;
            m_current_image.mappedRgbRef() = Mat3b(m_calib_data->rawDepthSize());
            m_current_image.mappedRgbRef() = Vec3b(0,0,0);
            m_current_image.mappedDepthRef() = Mat1f(m_calib_data->rawRgbSize());
            m_current_image.mappedDepthRef() = 0.f;
        }
    }

    m_rgbd_image.setCameraSerial(cameraSerial());
    m_current_image.setCameraSerial(cameraSerial());

    xn::SceneMetaData sceneMD;
    xn::DepthMetaData depthMD;
    xn::ImageMetaData rgbMD;
    xn::IRMetaData irMD;

    ImageBayerGRBG bayer_decoder(ImageBayerGRBG::EdgeAware);

    RGBDImage oversampled_image;
    if (m_subsampling_factor != 1)
    {
        oversampled_image.rawDepthRef().create(m_calib_data->rawDepthSize()*m_subsampling_factor);
        oversampled_image.userLabelsRef().create(oversampled_image.rawDepth().size());
    }

    while (!m_should_exit)
    {
        waitForNewEvent();
        ntk_dbg(2) << format("[%x] running iteration", this);

        {
            // OpenNI calls do not seem to be thread safe.
            QMutexLocker ni_locker(&m_ni_mutex);
            waitAndUpdateActiveGenerators();
        }

        if (m_track_users && m_body_event_detector)
            m_body_event_detector->update();

        m_ni_depth_generator.GetMetaData(depthMD);
        if (m_has_rgb)
        {
            if (m_get_infrared)
            {
                m_ni_ir_generator.GetMetaData(irMD);
            }
            else
            {
                m_ni_rgb_generator.GetMetaData(rgbMD);
            }
        }

        RGBDImage& temp_image =
                m_subsampling_factor == 1 ? m_current_image : oversampled_image;

        const XnDepthPixel* pDepth = depthMD.Data();
        ntk_assert((depthMD.XRes() == temp_image.rawDepth().cols)
                   && (depthMD.YRes() == temp_image.rawDepth().rows),
                   "Invalid image size.");

        // Convert to meters.
        const float depth_correction_factor = 1.0;
        float* raw_depth_ptr = temp_image.rawDepthRef().ptr<float>();
        for (int i = 0; i < depthMD.XRes()*depthMD.YRes(); ++i)
            raw_depth_ptr[i] = depth_correction_factor * pDepth[i]/1000.f;

        if (m_has_rgb)
        {
            if (m_get_infrared)
            {
                const XnGrayscale16Pixel* pImage = irMD.Data();
                m_current_image.rawIntensityRef().create(irMD.YRes(), irMD.XRes());
                float* raw_img_ptr = m_current_image.rawIntensityRef().ptr<float>();
                for (int i = 0; i < irMD.XRes()*irMD.YRes(); ++i)
                {
                    raw_img_ptr[i] = pImage[i];
                }
            }
            else
            {
                if (m_custom_bayer_decoding)
                {
                    uchar* raw_rgb_ptr = m_current_image.rawRgbRef().ptr<uchar>();
                    bayer_decoder.fillRGB(rgbMD,
                                          m_current_image.rawRgb().cols, m_current_image.rawRgb().rows,
                                          raw_rgb_ptr);
                    cvtColor(m_current_image.rawRgbRef(), m_current_image.rawRgbRef(), CV_RGB2BGR);
                }
                else
                {
                    const XnUInt8* pImage = rgbMD.Data();
                    ntk_assert(rgbMD.PixelFormat() == XN_PIXEL_FORMAT_RGB24, "Invalid RGB format.");
                    uchar* raw_rgb_ptr = m_current_image.rawRgbRef().ptr<uchar>();
                    for (int i = 0; i < rgbMD.XRes()*rgbMD.YRes()*3; i += 3)
                        for (int k = 0; k < 3; ++k)
                        {
                            raw_rgb_ptr[i+k] = pImage[i+(2-k)];
                        }
                }
            }
        }

        if (m_track_users)
        {
            m_ni_user_generator.GetUserPixels(0, sceneMD);
            uchar* user_mask_ptr = temp_image.userLabelsRef().ptr<uchar>();
            const XnLabel* pLabel = sceneMD.Data();
            for (int i = 0; i < sceneMD.XRes()*sceneMD.YRes(); ++i)
            {
                user_mask_ptr[i] = pLabel[i];
            }

            XnUserID user_ids[15];
            XnUInt16 num_users = 15;
            m_ni_user_generator.GetUsers(user_ids, num_users);

            // FIXME: only one user supported.
            for (int i = 0; i < num_users; ++i)
            {
                XnUserID user_id = user_ids[i];
                if (m_ni_user_generator.GetSkeletonCap().IsTracking(user_id))
                {
                    m_current_image.skeletonRef()->computeJoints(user_id, m_ni_user_generator, m_ni_depth_generator);
                    break;
                }
            }
        }

        if (m_subsampling_factor != 1)
        {
            // Cannot use interpolation here, since this would
            // spread the invalid depth values.
            cv::resize(oversampled_image.rawDepth(),
                       m_current_image.rawDepthRef(),
                       m_current_image.rawDepth().size(),
                       0, 0, INTER_NEAREST);
            // we have to repeat this, since resize can change the pointer.
            // m_current_image.depthRef() = m_current_image.rawDepthRef();
            cv::resize(oversampled_image.userLabels(),
                       m_current_image.userLabelsRef(),
                       m_current_image.userLabels().size(),
                       0, 0, INTER_NEAREST);
        }

        m_current_image.setTimestamp(getCurrentTimestamp());

        {
            QWriteLocker locker(&m_lock);
            m_current_image.swap(m_rgbd_image);
        }

        advertiseNewFrame();
    }
    ntk_dbg(1) << format("[%x] finishing", this);
}
bool RelativePoseEstimatorICPWithNormals<PointT> ::
computeRegistration(Pose3D& relative_pose,
                    PointCloudConstPtr source_cloud,
                    PointCloudConstPtr target_cloud,
                    PointCloudType& aligned_cloud)
{
#ifndef HAVE_PCL_GREATER_THAN_1_2_0
    return super::computeRegistration(relative_pose, source_cloud, target_cloud, aligned_cloud);
#else
    pcl::IterativeClosestPoint<PointT, PointT> reg;
    typedef pcl::registration::TransformationEstimationPointToPlaneLLS<PointT, PointT> PointToPlane;
    boost::shared_ptr<PointToPlane> point_to_plane (new PointToPlane);
    reg.setTransformationEstimation (point_to_plane);

    // typedef TransformationEstimationRGBD<PointT, PointT> TransformRGBD;
    // boost::shared_ptr<TransformRGBD> transform_rgbd (new TransformRGBD);
    // reg.setTransformationEstimation (transform_rgbd);

#ifdef HAVE_PCL_GREATER_THAN_1_6_0 // rejectors are not well supported before 1.7

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorDistance> rejector_distance (new pcl::registration::CorrespondenceRejectorDistance);
    rejector_distance->setInputSource<PointT>(source_cloud);
    rejector_distance->setInputTarget<PointT>(target_cloud);
    rejector_distance->setMaximumDistance(m_distance_threshold);
    reg.addCorrespondenceRejector(rejector_distance);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorSurfaceNormal> rejector_normal (new pcl::registration::CorrespondenceRejectorSurfaceNormal);
    rejector_normal->setThreshold(cos(M_PI/4.f));
    rejector_normal->initializeDataContainer<PointT, PointT>();
    rejector_normal->setInputSource<PointT>(source_cloud);
    rejector_normal->setInputTarget<PointT>(target_cloud);
    rejector_normal->setInputNormals<PointT, PointT>(source_cloud);
    rejector_normal->setTargetNormals<PointT, PointT>(target_cloud);
    reg.addCorrespondenceRejector(rejector_normal);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorOneToOne> rejector_one_to_one (new pcl::registration::CorrespondenceRejectorOneToOne);
    reg.addCorrespondenceRejector(rejector_one_to_one);

    typedef pcl::registration::CorrespondenceRejectorSampleConsensus<PointT> RejectorConsensusT;
    boost::shared_ptr<RejectorConsensusT> rejector_ransac (new RejectorConsensusT());
    rejector_ransac->setInputSource(source_cloud);
    rejector_ransac->setInputTarget(target_cloud);
    rejector_ransac->setInlierThreshold(m_ransac_outlier_threshold);
    rejector_ransac->setMaxIterations(100);
    reg.addCorrespondenceRejector(rejector_ransac);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorVarTrimmed> rejector_var_trimmed (new pcl::registration::CorrespondenceRejectorVarTrimmed());
    rejector_var_trimmed->setInputSource<PointT>(source_cloud);
    rejector_var_trimmed->setInputTarget<PointT>(target_cloud);
    rejector_var_trimmed->setMinRatio(0.1f);
    rejector_var_trimmed->setMaxRatio(0.75f);
    reg.addCorrespondenceRejector(rejector_var_trimmed);

    boost::shared_ptr<pcl::registration::CorrespondenceRejectorTrimmed> rejector_trimmed (new pcl::registration::CorrespondenceRejectorTrimmed());
    rejector_trimmed->setMinCorrespondences(static_cast<int>(0.1f * source_cloud->size()));
    rejector_trimmed->setOverlapRatio(0.5f);
    reg.addCorrespondenceRejector(rejector_trimmed);
#endif

#if 0
    ntk::Mesh target_mesh;
    pointCloudToMesh(target_mesh, *target_cloud);
    target_mesh.saveToPlyFile("debug_target.ply");

    ntk::Mesh source_mesh;
    pointCloudToMesh(source_mesh, *source_cloud);
    source_mesh.saveToPlyFile("debug_source.ply");
#endif

    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-10);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setRANSACOutlierRejectionThreshold(m_ransac_outlier_threshold);
#ifdef HAVE_PCL_GREATER_THAN_1_6_0
    reg.setInputSource (source_cloud);
#else
    reg.setInputCloud (source_cloud);
#endif
    reg.setInputTarget (target_cloud);
    reg.align (aligned_cloud);

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    if (0)
    {
        ntk::Mesh mesh1, mesh2;
        pointCloudToMesh(mesh1, aligned_cloud);
        pointCloudToMesh(mesh2, *target_cloud);
        mesh1.saveToPlyFile("debug_icp_1.ply");
        mesh2.saveToPlyFile("debug_icp_2.ply");
    }

#if 0
    ntk::Mesh debug_mesh;
    pointCloudToMesh(debug_mesh, aligned_cloud);
    debug_mesh.saveToPlyFile("debug_aligned.ply");
#endif

    ntk_dbg_print(reg.getFitnessScore(), 1);

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    relative_pose.setCameraTransform(T);
#endif
    return true;
}
Beispiel #22
0
  void PlaneEstimator :: estimate(RGBDImage& image, Mat1b& plane_points)
  {

      // Passing from 3D to the optimizer

      const cv::Mat3f& normal_image = image.normal();
      const cv::Mat1f& distance_image = image.depth();
      cv::Mat1b& mask_image = image.depthMaskRef();
      cv::Mat1b objfilter;
      mask_image.copyTo(objfilter);
      plane_points = image.normal().clone();
      plane_points = 0;

    if (!image.normal().data)
    {
      ntk_dbg(0) << "WARNING: you must active the normal filter to get plane estimation!";
      return;
    }

    double min[dim];
    double max[dim];
    int i;

    for (i=0;i<dim;i++)
    {
      max[i] = 1000.0;
      min[i] = -1000.0;
    }
    m_solver.Setup(min,max,DifferentialEvolutionSolver::stBest1Exp,0.8,0.75);


   // Early estimation of plane points projecting the normal values

    for (int r = 1; r < plane_points.rows-1; ++r)
    for (int c = 1; c < plane_points.cols-1; ++c)
    {
      if (objfilter.data && objfilter(r,c))
      {
        cv::Vec3f normal = normal_image(r,c);
        double prod = normal.dot(m_ref_plane);
        if (prod > 0.95)
          plane_points(r,c) = 255;
        else
          plane_points(r,c) = 0;
      }
    }

    // cleaning of the surface very first estimation
    dilate(plane_points,plane_points,cv::Mat());
    erode(plane_points,plane_points,cv::Mat());
    //imwrite("plane-initial.png",plane_points);

    std::vector<Point3f>& g = m_solver.planePointsRef();

    g.clear();
    
    for (int r = 1; r < plane_points.rows-1; ++r)
    for (int c = 1; c < plane_points.cols-1; ++c)
    {
      if (plane_points(r,c))
      {
        // possible table member!
        Point3f p3d = image.calibration()->depth_pose->unprojectFromImage(Point2f(c,r), distance_image(r,c));
        g.push_back(p3d);
      }
    }
    
    // Calculating...
    m_solver.Solve(max_generations);

    double *solution = m_solver.Solution();

    // Plane normalizer
    float suma = solution[0] + solution[1] + solution[2] + solution[3] ;
    for (int i = 0; i < 4; i++)
      solution[i] = solution[i]/ suma;

    ntk::Plane plano (solution[0], solution[1], solution[2], solution[3]);
    //Update RGBD object
    m_plane.set(solution[0], solution[1], solution[2], solution[3]);


    // Final estimation of plane points projecting the normal values

     cv::Vec3f diffevolnormal(solution[0], solution[1], solution[2]);

     for (int r = 1; r < plane_points.rows-1; ++r)
     for (int c = 1; c < plane_points.cols-1; ++c)
     {
       if (objfilter.data && objfilter(r,c))
       {
         cv::Vec3f normal = normal_image(r,c);
         double prod = normal.dot(diffevolnormal);

         if (prod > 0.5)
           plane_points(r,c) = 255;
         else
           plane_points(r,c) = 0;
       }
     }

     // Cleaning of the DE plane-pixels solution
     dilate(plane_points,plane_points,cv::Mat());
     erode(plane_points,plane_points,cv::Mat());
    // imwrite("plane-DE.png",plane_points);


  }
Beispiel #23
0
void RGBDCalibration :: loadFromFile(const char* filename)
{
    QFileInfo f (filename);
    ntk_throw_exception_if(!f.exists(), "Could not find calibration file.");
    cv::FileStorage calibration_file (filename, CV_STORAGE_READ);
    readMatrix(calibration_file, "rgb_intrinsics", rgb_intrinsics);
    readMatrix(calibration_file, "rgb_distortion", rgb_distortion);
    zero_rgb_distortion = rgb_distortion(0,0) < 1e-5 ? true : false;
    readMatrix(calibration_file, "depth_intrinsics", depth_intrinsics);
    readMatrix(calibration_file, "depth_distortion", depth_distortion);
    zero_depth_distortion = depth_distortion(0,0) < 1e-5 ? true : false;

    readMatrix(calibration_file, "R", R);
    readMatrix(calibration_file, "T", T);
    cv::Mat1i size_mat;
    readMatrix(calibration_file, "rgb_size", size_mat);
    rgb_size = cv::Size(size_mat(0,0), size_mat(0,1));
    readMatrix(calibration_file, "raw_rgb_size", size_mat);
    raw_rgb_size = cv::Size(size_mat(0,0), size_mat(0,1));
    readMatrix(calibration_file, "depth_size", size_mat);
    depth_size = cv::Size(size_mat(0,0), size_mat(0,1));
    readMatrix(calibration_file, "raw_depth_size", size_mat);
    raw_depth_size = cv::Size(size_mat(0,0), size_mat(0,1));

    try {
        readMatrix(calibration_file, "R_extrinsics", R_extrinsics);
        readMatrix(calibration_file, "T_extrinsics", T_extrinsics);
    }
    catch (...)
    {
        ntk_dbg(0) << "Warning: could not load extrinsics (R_extrinsics, T_extrinsics).";
    }

    try
    {
        cv::Mat1i size_mat;
        readMatrix(calibration_file, "infrared_size", size_mat);
        infrared_size = cv::Size(size_mat(0,0), size_mat(0,1));
    }
    catch (...)
    {
        ntk_dbg(1) << "Could not read infrared image size, setting default 1280x1024";
        infrared_size = cv::Size(1280, 1024);
    }

    try
    {
        readMatrix(calibration_file, "infrared_intrinsics", infrared_intrinsics);
        readMatrix(calibration_file, "infrared_distortion", infrared_distortion);
    } catch (...)
    {
        ntk_dbg(1) << "Warning: cannot read infrared camera intrinsics, computing from depth.";
        computeInfraredIntrinsicsFromDepth();
    }

    cv::Mat1f depth_calib (1,2);
    try {
        readMatrix(calibration_file, "depth_base_and_offset", depth_calib);
        depth_baseline = depth_calib(0,0);
        depth_offset = depth_calib(0,1);
    }
    catch(...)
    {
        ntk_dbg(1) << "Warning: could not load depth offset";
    }

    try {
        cv::Mat1f depth_multiplicative_correction_factor (1,1);
        readMatrix(calibration_file, "depth_multiplicative_correction_factor", depth_multiplicative_correction_factor);
        this->depth_multiplicative_correction_factor = depth_multiplicative_correction_factor(0,0);
    }
    catch(...)
    {
        ntk_dbg(1) << "Warning: could not load depth multiplicative factor";
    }

    try {
        cv::Mat1f depth_additive_correction_factor (1,1);
        readMatrix(calibration_file, "depth_additive_correction_factor", depth_additive_correction_factor);
        this->depth_additive_correction_factor = depth_additive_correction_factor(0,0);
    }
    catch(...)
    {
        ntk_dbg(1) << "Warning: could not load depth additive correction factor";
    }

    calibration_file.release();

    depth_pose = new Pose3D();
    rgb_pose = new Pose3D();
    updatePoses();
}
bool RelativePoseEstimatorICP :: estimateNewPose(const RGBDImage& image)
{
    if (m_ref_cloud.points.size() < 1)
    {
        ntk_dbg(1) << "Reference cloud was empty";
        return false;
    }

    PointCloud<PointXYZ>::Ptr target = m_ref_cloud.makeShared();
    PointCloud<PointXYZ>::Ptr source (new PointCloud<PointXYZ>());
    rgbdImageToPointCloud(*source, image);

    PointCloud<PointXYZ>::Ptr filtered_source (new PointCloud<PointXYZ>());
    PointCloud<PointXYZ>::Ptr filtered_target (new PointCloud<PointXYZ>());

    pcl::VoxelGrid<pcl::PointXYZ> grid;
    grid.setLeafSize (m_voxel_leaf_size, m_voxel_leaf_size, m_voxel_leaf_size);

    grid.setInputCloud(source);
    grid.filter(*filtered_source);

    grid.setInputCloud(target);
    grid.filter(*filtered_target);

    PointCloud<PointXYZ> cloud_reg;
    IterativeClosestPoint<PointXYZ, PointXYZ> reg;
    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-5);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setInputCloud (filtered_source);
    reg.setInputTarget (filtered_target);
    reg.align (cloud_reg);

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    Pose3D icp_pose;
    icp_pose.setCameraTransform(T);
    ntk_dbg_print(icp_pose.cvTranslation(), 1);

    // Pose3D stores the transformation from 3D space to image.
    // So we have to invert everything so that unprojecting from
    // image to 3D gives the right transformation.
    // H2' = ICP * H1'
    m_current_pose.resetCameraTransform();
    m_current_pose = *image.calibration()->depth_pose;
    m_current_pose.invert();
    m_current_pose.applyTransformAfter(icp_pose);
    m_current_pose.invert();
    return true;
}
bool OpenniGrabber :: connectToDevice()
{
    QMutexLocker ni_locker(&m_ni_mutex);
    ntk_dbg(1) << format("[Kinect %x] connecting", this);

    XnStatus status;
    xn::NodeInfoList device_node_info_list;
    status = m_driver.niContext().EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, NULL, device_node_info_list);
    m_driver.checkXnError(status, "Cannot enumerate devices.");

    xn::NodeInfoList::Iterator nodeIt = device_node_info_list.Begin();
    for (; nodeIt != device_node_info_list.End (); ++nodeIt)
    {
        xn::NodeInfo deviceInfo = *nodeIt;
        if (m_driver.deviceInfo(m_camera_id).creation_info == deviceInfo.GetCreationInfo())
            break;
    }
    ntk_throw_exception_if(nodeIt == device_node_info_list.End (), "Cannot find device.");

    setCameraSerial(m_driver.deviceInfo(m_camera_id).serial);

    xn::NodeInfo deviceInfo = *nodeIt;
    ntk_assert(m_driver.deviceInfo(m_camera_id).creation_info == deviceInfo.GetCreationInfo(), "Inconsistent nodes!");
    status = m_driver.niContext().CreateProductionTree(deviceInfo, m_ni_device);
    m_driver.checkXnError(status, "Create Device Node");
    const XnProductionNodeDescription& description = deviceInfo.GetDescription();
    ntk_dbg(1) << format("device %d: vendor %s name %s, instance %s, serial %s",
                         m_camera_id,
                         description.strVendor, description.strName, deviceInfo.GetInstanceName(), m_driver.deviceInfo(m_camera_id).serial.c_str());
    xn::Query query;
    query.AddNeededNode(deviceInfo.GetInstanceName());

    bool is_kinect = (std::string("SensorKinect") == description.strName) || (std::string("SensorV2") == description.strName);

    status = m_ni_depth_generator.Create(m_driver.niContext(), &query);
    m_driver.checkXnError(status, "Create depth generator");
    XnMapOutputMode depth_mode;
    depth_mode.nXRes = 640;
    depth_mode.nYRes = 480;
    depth_mode.nFPS = 30;
    m_ni_depth_generator.SetMapOutputMode(depth_mode);

    status = m_driver.niContext().SetGlobalMirror(m_mirrored);
    m_driver.checkXnError(status, "Mirror");

    status = m_ni_rgb_generator.Create(m_driver.niContext(), &query);
    if (status != XN_STATUS_OK)
    {
        m_has_rgb = false;
        ntk_dbg(1) << "Warning: no color stream!";
    }

    if (m_has_rgb)
    {
        status = m_ni_rgb_generator.SetIntProperty ("Resolution", 1);
        m_driver.checkXnError(status, "Resolution");

        XnMapOutputMode rgb_mode;
        if (m_high_resolution)
        {
            rgb_mode.nFPS = 15;
            rgb_mode.nXRes = 1280;
            rgb_mode.nYRes = 1024;
        }
        else
        {
            rgb_mode.nXRes = 640;
            rgb_mode.nYRes = 480;
            rgb_mode.nFPS = 30;
        }
        status = m_ni_rgb_generator.SetMapOutputMode(rgb_mode);
        m_driver.checkXnError(status, "Set RGB mode");

        ntk_throw_exception_if(!m_ni_depth_generator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT), "Cannot register images.");
        m_ni_depth_generator.GetAlternativeViewPointCap().SetViewPoint(m_ni_rgb_generator);

        if (!is_kinect && m_ni_depth_generator.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC))
        {
            ntk_dbg(1) << "Frame Sync supported.";
            status = m_ni_depth_generator.GetFrameSyncCap ().FrameSyncWith (m_ni_rgb_generator);
            m_driver.checkXnError(status, "Set Frame Sync");
        }

        status = m_ni_ir_generator.Create(m_driver.niContext(), &query);
        m_driver.checkXnError(status, "Create infrared generator");
        XnMapOutputMode ir_mode;
        ir_mode.nFPS = 15;
        ir_mode.nXRes = 1280;
        ir_mode.nYRes = 1024;
        m_ni_ir_generator.SetMapOutputMode(ir_mode);
    }

    if (m_track_users)
    {
        status = m_ni_user_generator.Create(m_driver.niContext(), &query);
        m_driver.checkXnError(status, "Create user generator");
        status = m_ni_hands_generator.Create(m_driver.niContext(), &query);
        m_driver.checkXnError(status, "Create hands generator");
        status = m_ni_gesture_generator.Create(m_driver.niContext(), &query);
        m_driver.checkXnError(status, "Create gestures generator");
    }

    if (m_track_users)
    {
        ntk_throw_exception_if(!m_ni_user_generator.IsCapabilitySupported(XN_CAPABILITY_SKELETON),
                               "Supplied user generator doesn't support skeleton.");

        XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
        m_ni_user_generator.RegisterUserCallbacks(User_NewUser, User_LostUser, this, hUserCallbacks);
        m_ni_user_generator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, this, hCalibrationCallbacks);

        if (m_ni_user_generator.GetSkeletonCap().NeedPoseForCalibration())
        {
            m_need_pose_to_calibrate = true;
            if (!m_ni_user_generator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
                ntk_throw_exception("Pose required, but not supported\n");
            m_ni_user_generator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, this, hPoseCallbacks);
            m_ni_user_generator.GetSkeletonCap().GetCalibrationPose(m_calibration_pose);
        }

        m_ni_user_generator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

        if (m_body_event_detector)
            m_body_event_detector->initialize(m_driver.niContext(), m_ni_depth_generator);
    }

    status = m_ni_depth_generator.StartGenerating();
    m_driver.checkXnError(status, "Depth::StartGenerating");

    if (m_has_rgb)
    {
        status = m_ni_rgb_generator.StartGenerating();
        m_driver.checkXnError(status, "RGB::StartGenerating");

        if (m_custom_bayer_decoding)
        {
            // Grayscale to get raw Bayer pattern.
            status = m_ni_rgb_generator.SetIntProperty ("InputFormat", 6);
            m_driver.checkXnError(status, "Change input format");

            status = m_ni_rgb_generator.SetPixelFormat(XN_PIXEL_FORMAT_GRAYSCALE_8_BIT);
            m_driver.checkXnError(status, "Change pixel format");
        }
    }

    if (m_track_users)
    {
        status = m_ni_user_generator.StartGenerating();
        m_driver.checkXnError(status, "User::StartGenerating");

        status = m_ni_hands_generator.StartGenerating();
        m_driver.checkXnError(status, "Hands::StartGenerating");

        status = m_ni_gesture_generator.StartGenerating();
        m_driver.checkXnError(status, "Gesture::StartGenerating");
    }

    waitAndUpdateActiveGenerators();
    if (!m_calib_data)
        estimateCalibration();

    m_connected = true;
    return true;
}
Beispiel #26
0
void NiteRGBDGrabber :: initialize()
{
    xn::EnumerationErrors errors;    
//    XnStatus status = m_ni_context.InitFromXmlFile(cfg.c_str(), &errors);
    
    XnStatus status = m_ni_context.Init();
    
    if (status != XN_STATUS_OK) {
        ntk_dbg(0) << "[ERROR] " << xnGetStatusString(status);
        ntk_throw_exception("Could not initialize NITE. Check Log."
                            "Most probable reasons are device not connected or no config/ directory"
                            "in the current directory.");
    }
    
    XnLicense license;
    strcpy(license.strKey, "0KOIk2JeIBYClPWVnMoRKn5cdY4=");
    strcpy(license.strVendor, "PrimeSense");
    status = m_ni_context.AddLicense(license);
    check_error(status, "add license");
        
    // find device & set name: 
    xn::Query devQuery;
    devQuery.SetCreationInfo(kinect_id.c_str());
    check_error(status, "set dev query creation info");

    xn::NodeInfoList device_node_info_list;     
    status = m_ni_context.EnumerateProductionTrees(XN_NODE_TYPE_DEVICE, &devQuery, device_node_info_list, NULL); 
    check_error(status, "device enum");
    
    xn::NodeInfoList::Iterator nodeIt = device_node_info_list.Begin();
    xn::NodeInfo info(*nodeIt); 
    status = info.SetInstanceName("Cam1");    
    check_error(status, "set device name");

    status = m_ni_context.CreateProductionTree(info);
    check_error(status, "create device production tree");
    

    // enumerate depth nodes: 
    xn::Query depthQuery;
    status = depthQuery.AddNeededNode("Cam1");
    check_error(status, "set depth query needed node");

    xn::NodeInfoList depth_node_info_list;     
    status = m_ni_context.EnumerateProductionTrees(XN_NODE_TYPE_DEPTH, &depthQuery, depth_node_info_list, NULL); 
    check_error(status, "depth enum");
    nodeIt = depth_node_info_list.Begin();
    info = xn::NodeInfo(*nodeIt);
    
    status = info.SetInstanceName("Depth1");    
    check_error(status, "set depth name");
        
    status = m_ni_context.CreateProductionTree(info);
    check_error(status, "create depth production tree");
    


    status = info.GetInstance(m_ni_depth_generator);
    check_error(status, "create depth generator instance");

    XnMapOutputMode mode;
    mode.nXRes = 640;
    mode.nYRes = 480;
    mode.nFPS = 30;
    
    status = m_ni_depth_generator.SetMapOutputMode(mode);
    check_error(status, "set depth map mode");
    
    // enumerate rgb nodes: 
    xn::NodeInfoList image_node_info_list; 
    status = m_ni_context.EnumerateProductionTrees(XN_NODE_TYPE_IMAGE, &depthQuery, image_node_info_list, NULL); 
    check_error(status, "image enum");
    nodeIt = image_node_info_list.Begin();
    info = xn::NodeInfo(*nodeIt);
    
    status = info.SetInstanceName("Image1");    
    check_error(status, "set image name");
    
    status = m_ni_context.CreateProductionTree(info);
    check_error(status, "create image production tree");
    
    status = info.GetInstance(m_ni_rgb_generator);
    check_error(status, "create image generator instance");
    
    XnMapOutputMode mode2;
    mode2.nXRes = 640;
    mode2.nYRes = 480;
    mode2.nFPS = 30;
    
    status = m_ni_rgb_generator.SetMapOutputMode(mode2);
    check_error(status, "set rgb map mode");

    if (enable_skeleton_tracking) {
        // enumerate user nodes: 
        xn::NodeInfoList user_node_info_list; 
        status = m_ni_context.EnumerateProductionTrees(XN_NODE_TYPE_USER, &depthQuery, user_node_info_list, NULL); 
        check_error(status, "user enum");
        nodeIt = user_node_info_list.Begin();
        info = xn::NodeInfo(*nodeIt);
        
        status = info.SetInstanceName("User1");    
        check_error(status, "set user name");
        
        status = m_ni_context.CreateProductionTree(info);
        check_error(status, "create user production tree");
        
        status = info.GetInstance(m_ni_user_generator);
        check_error(status, "create user generator instance");   
    }
    
    status = m_ni_context.StartGeneratingAll();
    check_error(status, "StartGenerating");



  if (m_high_resolution)
  {
    XnMapOutputMode rgb_mode;
    rgb_mode.nFPS = 15;
    rgb_mode.nXRes = 1280;
    rgb_mode.nYRes = 1024;
    m_ni_rgb_generator.SetMapOutputMode(rgb_mode);
  }

  if (m_custom_bayer_decoding)
  {
    // Grayscale to get raw Bayer pattern.
    status = m_ni_rgb_generator.SetIntProperty ("InputFormat", 6);
    check_error(status, "Change input format");

    status = m_ni_rgb_generator.SetPixelFormat(XN_PIXEL_FORMAT_GRAYSCALE_8_BIT);
    check_error(status, "Change pixel format");
  }

  ntk_ensure(m_ni_depth_generator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT), "Cannot register images.");
  m_ni_depth_generator.GetAlternativeViewPointCap().SetViewPoint(m_ni_rgb_generator);

    if (enable_skeleton_tracking) {
        ntk_throw_exception_if(!m_ni_user_generator.IsCapabilitySupported(XN_CAPABILITY_SKELETON),
                               "Supplied user generator doesn't support skeleton.");
        
        XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
        m_ni_user_generator.RegisterUserCallbacks(User_NewUser, User_LostUser, this, hUserCallbacks);
        m_ni_user_generator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, this, hCalibrationCallbacks);
        
        if (m_ni_user_generator.GetSkeletonCap().NeedPoseForCalibration())
        {
            m_need_pose_to_calibrate = true;
            if (!m_ni_user_generator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
                ntk_throw_exception("Pose required, but not supported\n");
            m_ni_user_generator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, this, hPoseCallbacks);
            m_ni_user_generator.GetSkeletonCap().GetCalibrationPose(m_calibration_pose);
        }
        
        m_ni_user_generator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
        
        if (m_body_event_detector)
            m_body_event_detector->initialize(m_ni_context, m_ni_depth_generator);        
    }

  status = m_ni_context.StartGeneratingAll();
  check_error(status, "StartGenerating");

  m_ni_context.WaitAndUpdateAll();
  estimateCalibration();
}
void LevenbergMarquartMinimizer::diagnoseOutcome(int debug_level) const
{
  ntk_dbg(debug_level) << cv::format("[LVMQ Status=%i NEval=%i NIter=%i ErrStartEnd=%f/%f]",
                                     m_info, m_num_function_evaluations, m_num_iterations,
                                     m_start_error, m_end_error);
}
Beispiel #28
0
void NiteRGBDGrabber :: initialize()
{
  const char* config_file = "config/NestkConfig.xml";

  xn::EnumerationErrors errors;
  XnStatus status = m_ni_context.InitFromXmlFile(config_file, &errors);
  if (status != XN_STATUS_OK)
  {
    ntk_dbg(0) << "[ERROR] " << xnGetStatusString(status);
    ntk_throw_exception("Could not initialize NITE. Check Log."
                        "Most probable reasons are device not connected or no config/ directory"
                        "in the current directory.");
  }

  status = m_ni_context.FindExistingNode(XN_NODE_TYPE_DEPTH, m_ni_depth_generator);
  check_error(status, "Find depth generator");

  status = m_ni_context.FindExistingNode(XN_NODE_TYPE_USER, m_ni_user_generator);
  check_error(status, "Find user generator");

  status = m_ni_context.FindExistingNode(XN_NODE_TYPE_IMAGE, m_ni_rgb_generator);
  check_error(status, "Find image generator");

  if (m_high_resolution)
  {
    XnMapOutputMode rgb_mode;
    rgb_mode.nFPS = 15;
    rgb_mode.nXRes = 1280;
    rgb_mode.nYRes = 1024;
    m_ni_rgb_generator.SetMapOutputMode(rgb_mode);
  }

  if (m_custom_bayer_decoding)
  {
    // Grayscale to get raw Bayer pattern.
    status = m_ni_rgb_generator.SetIntProperty ("InputFormat", 6);
    check_error(status, "Change input format");

    status = m_ni_rgb_generator.SetPixelFormat(XN_PIXEL_FORMAT_GRAYSCALE_8_BIT);
    check_error(status, "Change pixel format");
  }

  ntk_ensure(m_ni_depth_generator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT), "Cannot register images.");
  m_ni_depth_generator.GetAlternativeViewPointCap().SetViewPoint(m_ni_rgb_generator);

  ntk_throw_exception_if(!m_ni_user_generator.IsCapabilitySupported(XN_CAPABILITY_SKELETON),
                         "Supplied user generator doesn't support skeleton.");

  XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
  m_ni_user_generator.RegisterUserCallbacks(User_NewUser, User_LostUser, this, hUserCallbacks);
  m_ni_user_generator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, this, hCalibrationCallbacks);

  if (m_ni_user_generator.GetSkeletonCap().NeedPoseForCalibration())
  {
    m_need_pose_to_calibrate = true;
    if (!m_ni_user_generator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
      ntk_throw_exception("Pose required, but not supported\n");
    m_ni_user_generator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, this, hPoseCallbacks);
    m_ni_user_generator.GetSkeletonCap().GetCalibrationPose(m_calibration_pose);
  }

  m_ni_user_generator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

  if (m_body_event_detector)
    m_body_event_detector->initialize(m_ni_context, m_ni_depth_generator);

  status = m_ni_context.StartGeneratingAll();
  check_error(status, "StartGenerating");

  m_ni_context.WaitAndUpdateAll();
  estimateCalibration();
}