コード例 #1
0
void CalibratorBlock::calibrateWithICP(FrameVectorVectorConstPtr frames_)
{
    FrameVectorConstPtr frames = frames_->frames[0];
    if (frames->images.size() < 2)
    {
        ntk_dbg(0) << "Only one device, no need for alignment!";
        return;
    }

    // Compute Normals
    std::vector<RGBDImage> images (frames->images.size());
    OpenniRGBDProcessor processor;
    foreach_idx(i, images)
    {
        if (!frames->images[i]->calibration())
            continue;
        frames->images[i]->copyTo(images[i]);
        processor.computeNormalsPCL(images[i]);
    }

    if (!images[0].calibration())
    {
        ntk_dbg(0) << "The first camera " << frames->images[0]->cameraSerial()
                   << " does not have calibration. It can´t be the reference, returning.";
        return;
    }

    for (size_t i = 1; i < images.size(); ++i)
    {
        if (!images[i].calibration())
        {
            ntk_dbg(0) << "Warning: camera " << images[i].cameraSerial() << " does not have calibration.";
            continue;
        }

        RelativePoseEstimatorICPWithNormals<pcl::PointNormal> estimator;
        estimator.setDistanceThreshold(0.05);
        estimator.setVoxelSize(0.01);
        estimator.setTargetImage(images[0]);
        estimator.setSourceImage(images[i]);
        bool ok = estimator.estimateNewPose();
        if (!ok)
            continue;

        Pose3D new_pose = *images[i].calibration()->depth_pose;
        new_pose.applyTransformBefore(estimator.estimatedSourcePose());

        CalibrationParametersPtr params (new CalibrationParameters);
        params->new_t = new_pose.cvTranslation();
        params->new_r = new_pose.cvEulerRotation();
        params->camera_serial = images[i].cameraSerial();
        params->calibration = images[i].calibration();
        broadcastEvent(params);
    }
}
コード例 #2
0
int main(int argc, char** argv)
{
    arg_base::set_help_option("--help");
    arg_parse(argc, argv);
    ntk::ntk_debug_level = 1;

    namedWindow("corners");

    if      (std::string(global::opt_pattern_type()) == "chessboard") global::pattern_type = PatternChessboard;
    else if (std::string(global::opt_pattern_type()) == "circles") global::pattern_type = PatternCircles;
    else if (std::string(global::opt_pattern_type()) == "asymcircles") global::pattern_type = PatternAsymCircles;
    else fatal_error(format("Invalid pattern type: %s\n", global::opt_pattern_type()).c_str());

    global::calibration.loadFromFile(global::opt_input_file());

    global::initial_focal_length = global::calibration.rgb_pose->focalX();

    global::images_dir = QDir(global::opt_image_directory());
    ntk_ensure(global::images_dir.exists(), (global::images_dir.absolutePath() + " is not a directory.").toLatin1());

    global::images_list = global::images_dir.entryList(QStringList("view????*"), QDir::Dirs, QDir::Name);

    OpenniRGBDProcessor processor;
    processor.setFilterFlag(RGBDProcessorFlags::ComputeMapping, true);

    std::vector<ntk::RGBDImage> images;
    loadImageList(global::images_dir, global::images_list,
                  &processor, &global::calibration, images);

    std::vector< std::vector<Point2f> > good_corners;
    std::vector< std::vector<Point2f> > all_corners;
    getCalibratedCheckerboardCorners(images,
                                     global::opt_pattern_width(), global::opt_pattern_height(),
                                     global::pattern_type,
                                     all_corners, good_corners, true,
                                     global::opt_infrared());

    if (global::opt_infrared())
        calibrateFromInfrared(images, good_corners, all_corners);
    else
        calibrateFromColor(images, good_corners, all_corners);

    writeNestkMatrix();
    return 0;
}
コード例 #3
0
bool SurfelsRGBDModeler :: addNewView(const RGBDImage& image_, Pose3D& depth_pose)
{
    ntk::TimeCount tc("SurfelsRGBDModeler::addNewView", 1);
    const float max_camera_normal_angle = ntk::deg_to_rad(90);

    RGBDImage image;
    image_.copyTo(image);
    if (!image_.normal().data)
    {
        OpenniRGBDProcessor processor;
        processor.computeNormalsPCL(image);
    }

    Pose3D rgb_pose = depth_pose;
    rgb_pose.toRightCamera(image.calibration()->rgb_intrinsics, image.calibration()->R, image.calibration()->T);

    Pose3D world_to_camera_normal_pose;
    world_to_camera_normal_pose.applyTransformBefore(cv::Vec3f(0,0,0), depth_pose.cvEulerRotation());
    Pose3D camera_to_world_normal_pose = world_to_camera_normal_pose;
    camera_to_world_normal_pose.invert();

    const Mat1f& depth_im = image.depth();
    Mat1b covered_pixels (depth_im.size());
    covered_pixels = 0;

    std::list<Surfel> surfels_to_reinsert;

    // Surfel updating.
    for (SurfelMap::iterator next_it = m_surfels.begin(); next_it != m_surfels.end(); )
    {
        SurfelMap::iterator surfel_it = next_it;
        ++next_it;

        Surfel& surfel = surfel_it->second;
        if (!surfel.enabled())
            continue;

        Point3f surfel_2d = depth_pose.projectToImage(surfel.location);
        bool surfel_deleted = false;
        int r = ntk::math::rnd(surfel_2d.y);
        int c = ntk::math::rnd(surfel_2d.x);
        int d = ntk::math::rnd(surfel_2d.z);
        if (!is_yx_in_range(depth_im, r, c)
                || !image.depthMask()(r, c)
                || !image.isValidNormal(r,c))
            continue;

        const float update_max_dist = getCompatibilityDistance(depth_im(r,c));

        Vec3f camera_normal = image.normal()(r, c);
        normalize(camera_normal);

        Vec3f world_normal = camera_to_world_normal_pose.cameraTransform(camera_normal);
        normalize(world_normal);

        Vec3f eyev = camera_eye_vector(depth_pose, r, c);
        double camera_angle = acos(camera_normal.dot(-eyev));

        if (camera_angle > max_camera_normal_angle)
            continue;

        float normal_angle = acos(world_normal.dot(surfel.normal));
        // Surfels have different normals, maybe two different faces of the same object.
        if (normal_angle > (m_update_max_normal_angle*M_PI/180.0))
        {
            // Removal check. If a surfel has a different normal and is closer to the camera
            // than the new scan, remove it.
            if ((-surfel_2d.z) < depth_im(r,c) && surfel.n_views < 3)
            {
                m_surfels.erase(surfel_it);
                surfel_deleted = true;
            }
            continue;
        }

        // If existing surfel is far from new depth value:
        // - If existing one had a worst point of view, and was seen only once, remove it.
        // - Otherwise do not include the new one.
        if (std::abs(surfel_2d.z - depth_im(r,c)) > update_max_dist)
        {
            if (surfel.min_camera_angle > camera_angle && surfel.n_views < 3)
            {
                m_surfels.erase(surfel_it);
                surfel_deleted = true;
            }
            else
                covered_pixels(r,c) = 1;
            continue;
        }

        // Compatible surfel found.
        const float depth = depth_im(r,c) + m_global_depth_offset;

        Point3f p3d = depth_pose.unprojectFromImage(Point2f(c,r), depth);
        cv::Vec3b rgb_color = bgr_to_rgb(image.mappedRgb()(r, c));

        Surfel image_surfel;
        image_surfel.location = p3d;
        image_surfel.normal = world_normal;
        image_surfel.color = rgb_color;
        image_surfel.min_camera_angle = camera_angle;
        image_surfel.n_views = 1;
        image_surfel.radius = computeSurfelRadius(depth, camera_normal[2], depth_pose.meanFocal());
        mergeToLeftSurfel(surfel, image_surfel);

        covered_pixels(r,c) = 1;
        // needs to change the cell?
        Cell new_cell = worldToCell(surfel.location);
        if (new_cell != surfel_it->first)
        {
            surfels_to_reinsert.push_back(surfel);
            m_surfels.erase(surfel_it);
        }
    }

    foreach_const_it(it, surfels_to_reinsert, std::list<Surfel>)
    {
        Cell new_cell = worldToCell(it->location);
        m_surfels.insert(std::make_pair(new_cell, *it));
    }
コード例 #4
0
void calibrate_kinect_depth(std::vector< DepthCalibrationPoint >& depth_values)
{
    std::vector< std::vector<Point3f> > pattern_points;
    calibrationPattern(pattern_points,
                       global::opt_pattern_width(),  global::opt_pattern_height(), global::opt_square_size(),
                       global::images_list.size());

    for (int i_image = 0; i_image < global::images_list.size(); ++i_image)
    {
        // Generate depth calibration points
        QString filename = global::images_list[i_image];
        QDir cur_image_dir (global::images_dir.absoluteFilePath(filename));
        std::string full_filename = cur_image_dir.absoluteFilePath("raw/color.png").toStdString();
        RGBDImage image;
        OpenniRGBDProcessor processor;
        processor.setFilterFlag(RGBDProcessorFlags::ComputeMapping, true);
        image.loadFromDir(cur_image_dir.absolutePath().toStdString(), &global::calibration, &processor);

        imshow_normalized("mapped depth", image.mappedDepth());
        imshow("color", image.rgb());

        std::vector<Point2f> current_view_corners;
        calibrationCorners(full_filename, "corners",
                           global::opt_pattern_width(), global::opt_pattern_height(),
                           current_view_corners, image.rgb(), 1, global::pattern_type);

        if (current_view_corners.size() != (global::opt_pattern_width()*global::opt_pattern_height()))
        {
            ntk_dbg(1) << "Corners not detected in " << cur_image_dir.absolutePath().toStdString();
            continue;
        }

        // FIXME: why rvecs and tvecs from calibrateCamera seems to be nonsense ?
        // calling findExtrinsics another time to get good chessboard estimations.

        cv::Mat1f H;
        estimate_checkerboard_pose(pattern_points[0],
                                   current_view_corners,
                                   global::calibration.rgb_intrinsics,
                                   H);
        Pose3D pose;
        pose.setCameraParametersFromOpencv(global::calibration.rgb_intrinsics);
        pose.setCameraTransform(H);
        ntk_dbg_print(pose, 1);

        cv::Mat3b debug_img;
        image.rgb().copyTo(debug_img);
        foreach_idx(pattern_i, pattern_points[0])
        {
            Point3f p = pose.projectToImage(pattern_points[0][pattern_i]);
            ntk_dbg_print(p, 1);
            float kinect_raw = image.mappedDepth()(p.y, p.x);
            ntk_dbg_print(kinect_raw, 1);
            if (kinect_raw < 1e-5) continue;
            float err = kinect_raw-p.z;
            cv::putText(debug_img, format("%d", (int)(err*1000)), Point(p.x, p.y), CV_FONT_NORMAL, 0.4, Scalar(255,0,0));
            ntk_dbg_print(pattern_points[0][pattern_i], 1);
            ntk_dbg_print(p, 1);
            ntk_dbg_print(kinect_raw, 1);
            depth_values.push_back(DepthCalibrationPoint(kinect_raw, p.z));
        }
        imshow("errors", debug_img);
        cv::waitKey(0);
    }