Пример #1
0
void LabicCV::saveFrame() {
	RGBDImage tmp;
	while (!kinect->grabRGBDImage(tmp)) {}
	dinfo << "[LabicCV] Pushed frame " << tmp.timestamp() << " to queue" << endl;
	queue.push(tmp);
	ddebug << queue << endl;
}
void get_calibrated_kinect_corners(const QDir& image_dir,
                                   const QStringList& view_list,
                                   RGBDCalibration& calibration,
                                   RGBDProcessor& processor,
                                   std::vector< std::vector<Point2f> >& output_corners)
{
    std::vector< std::vector<Point2f> > good_corners;
    output_corners.resize(view_list.size());
    for (int i_image = 0; i_image < view_list.size(); ++i_image)
    {
        QString filename = view_list[i_image];
        QDir cur_image_dir (image_dir.absoluteFilePath(filename));

        RGBDImage image;
        image.loadFromDir(cur_image_dir.absolutePath().toStdString(), &calibration, &processor);

        std::vector<Point2f> current_view_corners;
        calibrationCorners(filename.toStdString(), "corners",
                           global::opt_pattern_width(), global::opt_pattern_height(),
                           current_view_corners, image.rgb(), 1,
                           global::pattern_type);

        if (current_view_corners.size() == global::opt_pattern_height()*global::opt_pattern_width())
        {
            output_corners[i_image] = current_view_corners;
            show_corners(image.rgb(), current_view_corners, 1);
        }
        else
        {
            ntk_dbg(0) << "Warning: corners not detected";
            output_corners[i_image].resize(0);
        }
    }
}
Пример #3
0
void filterColor(const RGBDImage& img, vector<Mat>& hsvs,
         Mat1b& result, cv::Point3f upleft, cv::Point2f fudge,
         int hue_from, int hue_to, 
         int sat_from, int sat_to,
         int val_from, int val_to) {
  
  const Mat1f& depth = img.depth();
  const Mat1b& mask = img.depthMask();
  const Mat& rgb = img.rgb();
  const RGBDCalibration* calib = img.calibration();

  if (!calib || upleft.y > rgb.rows || upleft.x > rgb.cols) return;

  int wid = result.cols * STEP_SIZE, ht = result.rows * STEP_SIZE;

  cv::Range rslice(upleft.y - fudge.y, upleft.y + ht + fudge.y),
            cslice(upleft.x - fudge.x, upleft.x + wid + fudge.x);
  rslice.start = max(0, rslice.start);
  rslice.end = min(rgb.rows, rslice.end);
  cslice.start = max(0, cslice.start);
  cslice.end = min(rgb.cols, cslice.end);

  //printf("%i %i %i %i\n", rslice.start, rslice.end, cslice.start, cslice.end);
  
  if (hsvs.size() == 0) {
    Mat sliced_rgb(rgb, rslice, cslice);
    Mat hsv;
    cvtColor(sliced_rgb, hsv, CV_BGR2HSV);
    split(hsv, hsvs);
  }

  const Pose3D* rgb_pose = calib->rgb_pose;
  const Pose3D* depth_pose = calib->depth_pose;

  for (int r = 0; r < result.rows; r++)
  for (int c = 0; c < result.cols; c++) {
    int x = c * STEP_SIZE + upleft.x;
    int y = r * STEP_SIZE + upleft.y;
    if (mask(y, x) == 0) continue;
    // Calculate rgb location for this particular depth element.
    double dv = depth(y, x);
    Point3f pu = depth_pose->unprojectFromImage(Point2f(x, y), dv);
    Point3f prgb = rgb_pose->projectToImage(pu);
    int i_x = ntk::math::rnd(prgb.x) - cslice.start;
    int i_y = ntk::math::rnd(prgb.y) - rslice.start;
    if (is_yx_in_range(hsvs[0], i_y, i_x)) {
      uchar hue = hsvs[0].at<uchar>(i_y, i_x);
      uchar sat = hsvs[1].at<uchar>(i_y, i_x);
      uchar val = hsvs[2].at<uchar>(i_y, i_x);
      result.at<bool>(r, c) =
        (   ((hue_from > hue_to) && ((hue > hue_from) || (hue < hue_to)))
         || ((hue_from < hue_to) && ((hue > hue_from) && (hue < hue_to))))
        && sat > sat_from && sat < sat_to && val > val_from && val < val_to;
    }
  }
}
Пример #4
0
 std::string RGBDFrameRecorder :: getNextFrameDirectory(const RGBDImage& image) const
 {
     std::string path = m_dir.absolutePath().toStdString();
     if (m_include_serial)
         path += "/" + image.cameraSerial();
     path += format("/view%04d", m_frame_index);
     if (m_include_timestamp)
         path += format("-%f", image.timestamp());
     return path;
 }
Пример #5
0
void clipNear(RGBDImage& img) {
  Mat1f& depth = img.depthRef();
  Mat1b& mask = img.depthMaskRef();

  double minVal, maxVal;
  Point minLoc, maxLoc;
  minMaxLoc(depth, &minVal, &maxVal, &minLoc, &maxLoc, mask);

  Mat lt;
  compare(depth, minVal + 1.0, lt, CMP_LT);
  bitwise_and(mask, lt, mask);
}
Пример #6
0
int main(int argc, char **argv)
{
    // Parse command line options.
    arg_base::set_help_option("-h");
    arg_parse(argc, argv);

    // Set debug level to 1.
    ntk::ntk_debug_level = 1;

    // Set current directory to application directory.
    // This is to find Nite config in config/ directory.
    QApplication app (argc, argv);
    QDir::setCurrent(QApplication::applicationDirPath());

    // Declare the global OpenNI driver. Only one can be instantiated in a program.
    OpenniDriver ni_driver;

    // Declare the frame grabber.
    OpenniGrabber grabber(ni_driver, opt::kinect_id());

    // High resolution 1280x1024 RGB Image.
    if (opt::high_resolution())
        grabber.setHighRgbResolution(true);

    // Start the grabber.
    grabber.connectToDevice();
    grabber.start();

    // Holder for the current image.
    RGBDImage image;

    // Image post processor. Compute mappings when RGB resolution is 1280x1024.
    OpenniRGBDProcessor post_processor;

    // Wait for a new frame, get a local copy and postprocess it.
    grabber.waitForNextFrame();
    grabber.copyImageTo(image);
    post_processor.processImage(image);

    ntk_ensure(image.calibration(), "Uncalibrated rgbd image, cannot project to 3D!");

    // Generate a mesh out of the rgbd image.
    MeshGenerator generator;
    generator.setUseColor(true);
    generator.setMeshType(MeshGenerator::PointCloudMesh);
    generator.generate(image, *image.calibration()->depth_pose, *image.calibration()->rgb_pose);

    // Save the grabber mesh to a ply file.
    ntk_dbg(0) << "Saving mesh file to output.ply...";
    generator.mesh().saveToPlyFile("output.ply");
    grabber.stop();
}
Пример #7
0
int main(int argc, char **argv)
{
    // Parse command line options.
    arg_base::set_help_option("-h");
    arg_parse(argc, argv);

    // Set debug level to 1.
    ntk::ntk_debug_level = 1;

    // Set current directory to application directory.
    // This is to find Nite config in config/ directory.
    QApplication app (argc, argv);
    QDir::setCurrent(QApplication::applicationDirPath());

    // Declare the grabber.
    SoftKineticIisuGrabber grabber;

    // Start the grabber.
    grabber.connectToDevice();
    grabber.start();

    // Holder for the current image.
    RGBDImage image;

    namedWindow("depth");
    namedWindow("color");

    char last_c = 0;
    while (true && (last_c != 27))
    {
        // Wait for a new frame, get a local copy and postprocess it.
        grabber.waitForNextFrame();
        grabber.copyImageTo(image);

        // Prepare the depth view.
        cv::Mat1b debug_depth_img = normalize_toMat1b(image.rawDepth());

        // Prepare the color view with skeleton and handpoint.
        cv::Mat3b debug_color_img;
        image.rawRgb().copyTo(debug_color_img);

        imshow("depth", debug_depth_img);
        imshow("color", debug_color_img);
        last_c = (cv::waitKey(10) & 0xff);
    }
    grabber.stop();
}
Пример #8
0
  void MeshGenerator :: generate(const RGBDImage& image,
                                 const Pose3D& depth_pose,
                                 const Pose3D& rgb_pose)
  {
    const Pose3D& real_depth_pose = depth_pose.isValid() ? depth_pose : *(image.calibration()->depth_pose);
    const Pose3D& real_rgb_pose = rgb_pose.isValid() ? rgb_pose : *(image.calibration()->rgb_pose);

    switch (m_mesh_type)
    {
    case TriangleMesh:
      return generateTriangleMesh(image, real_depth_pose, real_rgb_pose);
    case  PointCloudMesh:
      return generatePointCloudMesh(image, real_depth_pose, real_rgb_pose);
    case SurfelsMesh:
      return generateSurfelsMesh(image, real_depth_pose, real_rgb_pose);
    }
  }
void RelativePoseEstimatorFromRgbFeatures::setTargetImage(const RGBDImage &image)
{
    ntk_ensure(image.calibration(), "Image must be calibrated.");
    m_target_image = &image;
    if (!m_target_pose.isValid())
    {
        m_target_pose = *m_target_image->calibration()->depth_pose;
    }
    m_target_features = toPtr(new FeatureSet);
}
Пример #10
0
  void MeshGenerator :: generatePointCloudMesh(const RGBDImage& image,
                                               const Pose3D& depth_pose,
                                               const Pose3D& rgb_pose)
  {
    m_mesh.clear();
    m_mesh.vertices.reserve(image.depth().rows*image.depth().cols);
    m_mesh.colors.reserve(image.depth().rows*image.depth().cols);
    m_mesh.normals.reserve(image.depth().rows*image.depth().cols);

    const cv::Mat1f& depth_im = image.depth();
    const cv::Mat1b& mask_im = image.depthMask();
    cv::Mat3f voxels (depth_im.size());
    cv::Mat3f rgb_points (depth_im.size());

    cv::Mat1b subsample_mask(mask_im.size());
    subsample_mask = 0;
    for (float r = 0; r < subsample_mask.rows-1; r += 1.0/m_resolution_factor)
      for (float c = 0; c < subsample_mask.cols-1; c += 1.0/m_resolution_factor)
        subsample_mask(ntk::math::rnd(r),ntk::math::rnd(c)) = 1;
    subsample_mask = mask_im & subsample_mask;

    depth_pose.unprojectFromImage(depth_im, subsample_mask, voxels);
    if (m_use_color && image.hasRgb())
      rgb_pose.projectToImage(voxels, subsample_mask, rgb_points);

    for (int r = 0; r < voxels.rows; ++r)
    {
      Vec3f* voxels_data = voxels.ptr<Vec3f>(r);
      const uchar* mask_data = subsample_mask.ptr<uchar>(r);
      for (int c = 0; c < voxels.cols; ++c)
      {
        if (!mask_data[c])
          continue;

        Vec3b color (0,0,0);
        if (m_use_color)
        {
          Point3f prgb = rgb_points(r,c);
          int i_y = ntk::math::rnd(prgb.y);
          int i_x = ntk::math::rnd(prgb.x);
          if (is_yx_in_range(image.rgb(), i_y, i_x))
          {
            Vec3b bgr = image.rgb()(i_y, i_x);
            color = Vec3b(bgr[2], bgr[1], bgr[0]);
          }
        }
        else
        {
          int g = 0;
          if (image.intensity().data)
            g = image.intensity()(r,c);
          else
            g = 255 * voxels_data[c][2] / 10.0;
          color = Vec3b(g,g,g);
        }

        m_mesh.vertices.push_back(voxels_data[c]);
        m_mesh.colors.push_back(color);
      }
    }
  }
Пример #11
0
  void MeshGenerator :: generateTriangleMesh(const RGBDImage& image,
                                             const Pose3D& depth_pose,
                                             const Pose3D& rgb_pose)
  {
    const Mat1f& depth_im = image.depth();
    const Mat1b& mask_im = image.depthMask();
    m_mesh.clear();
    if (m_use_color)
      image.rgb().copyTo(m_mesh.texture);
    else if (image.intensity().data)
      toMat3b(normalize_toMat1b(image.intensity())).copyTo(m_mesh.texture);
    else
    {
      m_mesh.texture.create(depth_im.size());
      m_mesh.texture = Vec3b(255,255,255);
    }
    m_mesh.vertices.reserve(depth_im.cols*depth_im.rows);
    m_mesh.texcoords.reserve(depth_im.cols*depth_im.rows);
    m_mesh.colors.reserve(depth_im.cols*depth_im.rows);
    Mat1i vertice_map(depth_im.size());
    vertice_map = -1;
    for_all_rc(depth_im)
    {
      if (!mask_im(r,c))
        continue;
      double depth = depth_im(r,c);
      Point3f p3d = depth_pose.unprojectFromImage(Point3f(c,r,depth));
      Point3f p2d_rgb;
      Point2f texcoords;
      if (m_use_color)
      {
        p2d_rgb = rgb_pose.projectToImage(p3d);
        texcoords = Point2f(p2d_rgb.x/image.rgb().cols, p2d_rgb.y/image.rgb().rows);
      }
      else
      {
        p2d_rgb = Point3f(c,r,depth);
        texcoords = Point2f(p2d_rgb.x/image.intensity().cols, p2d_rgb.y/image.intensity().rows);
      }
      vertice_map(r,c) = m_mesh.vertices.size();
      m_mesh.vertices.push_back(p3d);
      // m_mesh.colors.push_back(bgr_to_rgb(im.rgb()(p2d_rgb.y, p2d_rgb.x)));
      m_mesh.texcoords.push_back(texcoords);
    }

    for_all_rc(vertice_map)
    {
      if (vertice_map(r,c) < 0)
        continue;

      if ((c < vertice_map.cols - 1) &&  (r < vertice_map.rows - 1) &&
          (vertice_map(r+1,c)>=0) && (vertice_map(r,c+1) >= 0) &&
          (std::abs(depth_im(r,c) - depth_im(r+1, c)) < m_max_delta_depth) &&
          (std::abs(depth_im(r,c) - depth_im(r, c+1)) < m_max_delta_depth))
      {
        Face f;
        f.indices[2] = vertice_map(r,c);
        f.indices[1] = vertice_map(r,c+1);
        f.indices[0] = vertice_map(r+1,c);
        m_mesh.faces.push_back(f);
      }

      if ((c > 0) &&  (r < vertice_map.rows - 1) &&
          (vertice_map(r+1,c)>=0) && (vertice_map(r+1,c-1) >= 0) &&
          (std::abs(depth_im(r,c) - depth_im(r+1, c)) < m_max_delta_depth) &&
          (std::abs(depth_im(r,c) - depth_im(r+1, c-1)) < m_max_delta_depth))
      {
        Face f;
        f.indices[2] = vertice_map(r,c);
        f.indices[1] = vertice_map(r+1,c);
        f.indices[0] = vertice_map(r+1,c-1);
        m_mesh.faces.push_back(f);
      }
    }
    m_mesh.computeNormalsFromFaces();
  }
Пример #12
0
  void MeshGenerator :: generateSurfelsMesh(const RGBDImage& image,
                                            const Pose3D& depth_pose,
                                            const Pose3D& rgb_pose)
  {
    double min_val = 0, max_val = 0;
    if (image.amplitude().data)
      minMaxLoc(image.amplitude(), &min_val, &max_val);

    m_mesh.clear();

    const cv::Mat1f& depth_im = image.depth();
    const cv::Mat1b& mask_im = image.depthMask();

    for_all_rc(depth_im)
    {
      int i_r = r;
      int i_c = c;
      if (!is_yx_in_range(depth_im, i_r, i_c))
        continue;

      if (!mask_im(r,c))
        continue;

      double depth = depth_im(i_r,i_c);
      cv::Point3f p = depth_pose.unprojectFromImage(Point2f(c,r), depth);

      Point3f normal = image.normal().data ? image.normal()(i_r, i_c) : Vec3f(0,0,1);

      Vec3b color (0,0,0);
      if (m_use_color)
      {
        cv::Point3f prgb = rgb_pose.projectToImage(p);
        int i_y = ntk::math::rnd(prgb.y);
        int i_x = ntk::math::rnd(prgb.x);
        if (is_yx_in_range(image.rgb(), i_y, i_x))
        {
          Vec3b bgr = image.rgb()(i_y, i_x);
          color = Vec3b(bgr[2], bgr[1], bgr[0]);
        }
      }
      else
      {
        int g = 0;
        if (image.amplitude().data)
          g = 255.0 * (image.amplitude()(i_r,i_c) - min_val) / (max_val-min_val);
        else
          g = 255 * depth / 10.0;
        color = Vec3b(g,g,g);
      }

      Surfel s;
      s.color = color;
      s.confidence = 0;
      s.location = p;
      s.normal = normal;
      s.n_views = 1;
      double normal_z = std::max(normal.z, 0.5f);
      s.radius = m_resolution_factor * ntk::math::sqrt1_2 * depth
          / (depth_pose.focalX() * normal_z);
      m_mesh.addSurfel(s);
    }
  }
bool SurfelsRGBDModeler :: addNewView(const RGBDImage& image_, Pose3D& depth_pose)
{
    ntk::TimeCount tc("SurfelsRGBDModeler::addNewView", 1);
    const float max_camera_normal_angle = ntk::deg_to_rad(90);

    RGBDImage image;
    image_.copyTo(image);
    if (!image_.normal().data)
    {
        OpenniRGBDProcessor processor;
        processor.computeNormalsPCL(image);
    }

    Pose3D rgb_pose = depth_pose;
    rgb_pose.toRightCamera(image.calibration()->rgb_intrinsics, image.calibration()->R, image.calibration()->T);

    Pose3D world_to_camera_normal_pose;
    world_to_camera_normal_pose.applyTransformBefore(cv::Vec3f(0,0,0), depth_pose.cvEulerRotation());
    Pose3D camera_to_world_normal_pose = world_to_camera_normal_pose;
    camera_to_world_normal_pose.invert();

    const Mat1f& depth_im = image.depth();
    Mat1b covered_pixels (depth_im.size());
    covered_pixels = 0;

    std::list<Surfel> surfels_to_reinsert;

    // Surfel updating.
    for (SurfelMap::iterator next_it = m_surfels.begin(); next_it != m_surfels.end(); )
    {
        SurfelMap::iterator surfel_it = next_it;
        ++next_it;

        Surfel& surfel = surfel_it->second;
        if (!surfel.enabled())
            continue;

        Point3f surfel_2d = depth_pose.projectToImage(surfel.location);
        bool surfel_deleted = false;
        int r = ntk::math::rnd(surfel_2d.y);
        int c = ntk::math::rnd(surfel_2d.x);
        int d = ntk::math::rnd(surfel_2d.z);
        if (!is_yx_in_range(depth_im, r, c)
                || !image.depthMask()(r, c)
                || !image.isValidNormal(r,c))
            continue;

        const float update_max_dist = getCompatibilityDistance(depth_im(r,c));

        Vec3f camera_normal = image.normal()(r, c);
        normalize(camera_normal);

        Vec3f world_normal = camera_to_world_normal_pose.cameraTransform(camera_normal);
        normalize(world_normal);

        Vec3f eyev = camera_eye_vector(depth_pose, r, c);
        double camera_angle = acos(camera_normal.dot(-eyev));

        if (camera_angle > max_camera_normal_angle)
            continue;

        float normal_angle = acos(world_normal.dot(surfel.normal));
        // Surfels have different normals, maybe two different faces of the same object.
        if (normal_angle > (m_update_max_normal_angle*M_PI/180.0))
        {
            // Removal check. If a surfel has a different normal and is closer to the camera
            // than the new scan, remove it.
            if ((-surfel_2d.z) < depth_im(r,c) && surfel.n_views < 3)
            {
                m_surfels.erase(surfel_it);
                surfel_deleted = true;
            }
            continue;
        }

        // If existing surfel is far from new depth value:
        // - If existing one had a worst point of view, and was seen only once, remove it.
        // - Otherwise do not include the new one.
        if (std::abs(surfel_2d.z - depth_im(r,c)) > update_max_dist)
        {
            if (surfel.min_camera_angle > camera_angle && surfel.n_views < 3)
            {
                m_surfels.erase(surfel_it);
                surfel_deleted = true;
            }
            else
                covered_pixels(r,c) = 1;
            continue;
        }

        // Compatible surfel found.
        const float depth = depth_im(r,c) + m_global_depth_offset;

        Point3f p3d = depth_pose.unprojectFromImage(Point2f(c,r), depth);
        cv::Vec3b rgb_color = bgr_to_rgb(image.mappedRgb()(r, c));

        Surfel image_surfel;
        image_surfel.location = p3d;
        image_surfel.normal = world_normal;
        image_surfel.color = rgb_color;
        image_surfel.min_camera_angle = camera_angle;
        image_surfel.n_views = 1;
        image_surfel.radius = computeSurfelRadius(depth, camera_normal[2], depth_pose.meanFocal());
        mergeToLeftSurfel(surfel, image_surfel);

        covered_pixels(r,c) = 1;
        // needs to change the cell?
        Cell new_cell = worldToCell(surfel.location);
        if (new_cell != surfel_it->first)
        {
            surfels_to_reinsert.push_back(surfel);
            m_surfels.erase(surfel_it);
        }
    }

    foreach_const_it(it, surfels_to_reinsert, std::list<Surfel>)
    {
        Cell new_cell = worldToCell(it->location);
        m_surfels.insert(std::make_pair(new_cell, *it));
    }
Пример #14
0
LabelImage RandomForestImage::predict(const RGBDImage& image,
         cuv::ndarray<float, cuv::host_memory_space>* probabilities, const bool onGPU, bool useDepthImages) const {

    LabelImage prediction(image.getWidth(), image.getHeight());

    const LabelType numClasses = getNumClasses();

    if (treeData.size() != ensemble.size()) {
        throw std::runtime_error((boost::format("tree data size: %d, ensemble size: %d. histograms normalized?")
                % treeData.size() % ensemble.size()).str());
    }

    cuv::ndarray<float, cuv::host_memory_space> hostProbabilities(
            cuv::extents[numClasses][image.getHeight()][image.getWidth()],
            m_predictionAllocator);

    if (onGPU) {
        cuv::ndarray<float, cuv::dev_memory_space> deviceProbabilities(
                cuv::extents[numClasses][image.getHeight()][image.getWidth()],
                m_predictionAllocator);
        cudaSafeCall(cudaMemset(deviceProbabilities.ptr(), 0, static_cast<size_t>(deviceProbabilities.size() * sizeof(float))));

        {
            utils::Profile profile("classifyImagesGPU");
            for (const boost::shared_ptr<const TreeNodes>& data : treeData) {
                classifyImage(treeData.size(), deviceProbabilities, image, numClasses, data, useDepthImages);
            }
        }

        normalizeProbabilities(deviceProbabilities);

        cuv::ndarray<LabelType, cuv::dev_memory_space> output(image.getHeight(), image.getWidth(),
                m_predictionAllocator);
        determineMaxProbabilities(deviceProbabilities, output);

        hostProbabilities = deviceProbabilities;
        cuv::ndarray<LabelType, cuv::host_memory_space> outputHost(image.getHeight(), image.getWidth(),
                m_predictionAllocator);

        outputHost = output;

        {
            utils::Profile profile("setLabels");
            for (int y = 0; y < image.getHeight(); ++y) {
                for (int x = 0; x < image.getWidth(); ++x) {
                    prediction.setLabel(x, y, static_cast<LabelType>(outputHost(y, x)));
                }
            }
        }
    } else {
        utils::Profile profile("classifyImagesCPU");

        tbb::parallel_for(tbb::blocked_range<size_t>(0, image.getHeight()),
                [&](const tbb::blocked_range<size_t>& range) {
                    for(size_t y = range.begin(); y != range.end(); y++) {
                        for(int x=0; x < image.getWidth(); x++) {

                            for (LabelType label = 0; label < numClasses; label++) {
                                hostProbabilities(label, y, x) = 0.0f;
                            }

                            for (const auto& tree : ensemble) {
                                const auto& t = tree->getTree();
                                PixelInstance pixel(&image, 0, x, y);
                                const auto& hist = t->classifySoft(pixel);
                                assert(hist.size() == numClasses);
                                for(LabelType label = 0; label<hist.size(); label++) {
                                    hostProbabilities(label, y, x) += hist[label];
                                }
                            }

                            double sum = 0.0f;
                            for (LabelType label = 0; label < numClasses; label++) {
                                sum += hostProbabilities(label, y, x);
                            }
                            float bestProb = -1.0f;
                            for (LabelType label = 0; label < numClasses; label++) {
                                hostProbabilities(label, y, x) /= sum;
                                float prob = hostProbabilities(label, y, x);
                                if (prob > bestProb) {
                                    prediction.setLabel(x, y, label);
                                    bestProb = prob;
                                }
                            }
                        }
                    }
                });
    }

    if (probabilities) {
        *probabilities = hostProbabilities;
    }

    return prediction;
}
Пример #15
0
void calibrate_kinect_depth(std::vector< DepthCalibrationPoint >& depth_values)
{
    std::vector< std::vector<Point3f> > pattern_points;
    calibrationPattern(pattern_points,
                       global::opt_pattern_width(),  global::opt_pattern_height(), global::opt_square_size(),
                       global::images_list.size());

    for (int i_image = 0; i_image < global::images_list.size(); ++i_image)
    {
        // Generate depth calibration points
        QString filename = global::images_list[i_image];
        QDir cur_image_dir (global::images_dir.absoluteFilePath(filename));
        std::string full_filename = cur_image_dir.absoluteFilePath("raw/color.png").toStdString();
        RGBDImage image;
        OpenniRGBDProcessor processor;
        processor.setFilterFlag(RGBDProcessorFlags::ComputeMapping, true);
        image.loadFromDir(cur_image_dir.absolutePath().toStdString(), &global::calibration, &processor);

        imshow_normalized("mapped depth", image.mappedDepth());
        imshow("color", image.rgb());

        std::vector<Point2f> current_view_corners;
        calibrationCorners(full_filename, "corners",
                           global::opt_pattern_width(), global::opt_pattern_height(),
                           current_view_corners, image.rgb(), 1, global::pattern_type);

        if (current_view_corners.size() != (global::opt_pattern_width()*global::opt_pattern_height()))
        {
            ntk_dbg(1) << "Corners not detected in " << cur_image_dir.absolutePath().toStdString();
            continue;
        }

        // FIXME: why rvecs and tvecs from calibrateCamera seems to be nonsense ?
        // calling findExtrinsics another time to get good chessboard estimations.

        cv::Mat1f H;
        estimate_checkerboard_pose(pattern_points[0],
                                   current_view_corners,
                                   global::calibration.rgb_intrinsics,
                                   H);
        Pose3D pose;
        pose.setCameraParametersFromOpencv(global::calibration.rgb_intrinsics);
        pose.setCameraTransform(H);
        ntk_dbg_print(pose, 1);

        cv::Mat3b debug_img;
        image.rgb().copyTo(debug_img);
        foreach_idx(pattern_i, pattern_points[0])
        {
            Point3f p = pose.projectToImage(pattern_points[0][pattern_i]);
            ntk_dbg_print(p, 1);
            float kinect_raw = image.mappedDepth()(p.y, p.x);
            ntk_dbg_print(kinect_raw, 1);
            if (kinect_raw < 1e-5) continue;
            float err = kinect_raw-p.z;
            cv::putText(debug_img, format("%d", (int)(err*1000)), Point(p.x, p.y), CV_FONT_NORMAL, 0.4, Scalar(255,0,0));
            ntk_dbg_print(pattern_points[0][pattern_i], 1);
            ntk_dbg_print(p, 1);
            ntk_dbg_print(kinect_raw, 1);
            depth_values.push_back(DepthCalibrationPoint(kinect_raw, p.z));
        }
        imshow("errors", debug_img);
        cv::waitKey(0);
    }
void RelativePoseEstimatorFromRgbFeatures::setSourceImage(const RGBDImage &image, ntk::Ptr<FeatureSet> features)
{
    ntk_ensure(image.calibration(), "Image must be calibrated.");
    super::setSourceImage(image);
    m_source_features = features;
}
bool RelativePoseEstimatorICP :: estimateNewPose(const RGBDImage& image)
{
    if (m_ref_cloud.points.size() < 1)
    {
        ntk_dbg(1) << "Reference cloud was empty";
        return false;
    }

    PointCloud<PointXYZ>::Ptr target = m_ref_cloud.makeShared();
    PointCloud<PointXYZ>::Ptr source (new PointCloud<PointXYZ>());
    rgbdImageToPointCloud(*source, image);

    PointCloud<PointXYZ>::Ptr filtered_source (new PointCloud<PointXYZ>());
    PointCloud<PointXYZ>::Ptr filtered_target (new PointCloud<PointXYZ>());

    pcl::VoxelGrid<pcl::PointXYZ> grid;
    grid.setLeafSize (m_voxel_leaf_size, m_voxel_leaf_size, m_voxel_leaf_size);

    grid.setInputCloud(source);
    grid.filter(*filtered_source);

    grid.setInputCloud(target);
    grid.filter(*filtered_target);

    PointCloud<PointXYZ> cloud_reg;
    IterativeClosestPoint<PointXYZ, PointXYZ> reg;
    reg.setMaximumIterations (m_max_iterations);
    reg.setTransformationEpsilon (1e-5);
    reg.setMaxCorrespondenceDistance (m_distance_threshold);
    reg.setInputCloud (filtered_source);
    reg.setInputTarget (filtered_target);
    reg.align (cloud_reg);

    if (!reg.hasConverged())
    {
      ntk_dbg(1) << "ICP did not converge, ignoring.";
      return false;
    }

    Eigen::Matrix4f t = reg.getFinalTransformation ();
    cv::Mat1f T(4,4);
    //toOpencv(t,T);
    for (int r = 0; r < 4; ++r)
        for (int c = 0; c < 4; ++c)
            T(r,c) = t(r,c);

    Pose3D icp_pose;
    icp_pose.setCameraTransform(T);
    ntk_dbg_print(icp_pose.cvTranslation(), 1);

    // Pose3D stores the transformation from 3D space to image.
    // So we have to invert everything so that unprojecting from
    // image to 3D gives the right transformation.
    // H2' = ICP * H1'
    m_current_pose.resetCameraTransform();
    m_current_pose = *image.calibration()->depth_pose;
    m_current_pose.invert();
    m_current_pose.applyTransformAfter(icp_pose);
    m_current_pose.invert();
    return true;
}
Пример #18
0
  void RGBDFrameRecorder :: writeFrame(const RGBDImage& image, const std::string& frame_dir)
  {
      std::string raw_frame_dir = format("%s/raw", frame_dir.c_str(), m_frame_index);

      QDir dir (frame_dir.c_str());
      dir.mkpath("raw");

      std::string filename;

      if (m_save_rgb_pose && image.calibration())
      {
        filename = cv::format("%s/rgb_pose.avs", frame_dir.c_str());
        image.rgbPose().saveToAvsFile(filename.c_str());
      }

      if (!m_only_raw)
      {
        filename = cv::format("%s/color.png", frame_dir.c_str());
        imwrite(filename, image.rgb());
      }

      if (m_save_pcl_point_cloud)
      {
        filename = cv::format("%s/cloud.pcd", frame_dir.c_str());
#ifdef NESTK_USE_PCL
        pcl::PointCloud<pcl::PointXYZ> cloud;
        rgbdImageToPointCloud(cloud, image);
        pcl::io::savePCDFileASCII(filename.c_str(), cloud);
#endif
      }

      if (m_use_compressed_format)
          filename = cv::format("%s/raw/color.png", frame_dir.c_str());
      else
          filename = cv::format("%s/raw/color.bmp", frame_dir.c_str());
      imwrite(filename, image.rawRgb());

      if (!m_only_raw && image.mappedDepth().data)
      {
        filename = cv::format("%s/mapped_depth.png", frame_dir.c_str());
        imwrite_normalized(filename, image.mappedDepth());

        filename = cv::format("%s/mapped_color.png", frame_dir.c_str());
        imwrite(filename, image.mappedRgb());

        filename = cv::format("%s/depth.yml", frame_dir.c_str());
        imwrite_yml(filename, image.mappedDepth());
      }

      if (!m_only_raw)
      {
        filename = cv::format("%s/raw/depth.png", frame_dir.c_str());
        if (image.rawDepth().data)
          imwrite_normalized(filename.c_str(), image.rawDepth());

        filename = cv::format("%s/depth.png", frame_dir.c_str());
        if (image.depth().data)
          imwrite_normalized(filename.c_str(), image.depth());

        filename = cv::format("%s/intensity.png", frame_dir.c_str());
        if (image.intensity().data)
            imwrite_normalized(filename.c_str(), image.intensity());
      }

      if (image.rawDepth().data)
      {
        if (m_use_binary_raw)
        {
          filename = cv::format("%s/raw/depth.raw", frame_dir.c_str());
          imwrite_Mat1f_raw(filename.c_str(), image.rawDepth());
        }
        else
        {
          filename = cv::format("%s/raw/depth.yml", frame_dir.c_str());
          imwrite_yml(filename.c_str(), image.rawDepth());
        }
      }

      if (m_save_intensity && image.rawIntensity().data)
      {
        if (m_use_binary_raw)
        {
          filename = cv::format("%s/raw/intensity.raw", frame_dir.c_str());
          imwrite_Mat1f_raw(filename.c_str(), image.rawIntensity());
        }
        else
        {
          filename = cv::format("%s/raw/intensity.yml", frame_dir.c_str());
          imwrite_yml(filename.c_str(), image.rawIntensity());
        }
      }

      if (!m_only_raw)
      {
        filename = cv::format("%s/raw/amplitude.png", frame_dir.c_str());
        if (image.rawAmplitude().data)
          imwrite_normalized(filename.c_str(), image.rawAmplitude());

        filename = cv::format("%s/amplitude.png", frame_dir.c_str());
        if (image.amplitude().data)
          imwrite_normalized(filename.c_str(), image.amplitude());
      }

      if (image.rawAmplitude().data)
      {
        if (m_use_binary_raw)
        {
          filename = cv::format("%s/raw/amplitude.raw", frame_dir.c_str());
          imwrite_Mat1f_raw(filename.c_str(), image.rawAmplitude());
        }
        else
        {
          filename = cv::format("%s/raw/amplitude.yml", frame_dir.c_str());
          imwrite_yml(filename.c_str(), image.rawAmplitude());
        }
      }
  }
Пример #19
0
  void RGBDFrameRecorder :: saveCurrentFrame(const RGBDImage& image)
  {
    std::string frame_dir = format("%s/view%04d", m_dir.absolutePath().toStdString().c_str(), m_frame_index);
    std::string raw_frame_dir = format("%s/raw", frame_dir.c_str(), m_frame_index);

    QDir dir (frame_dir.c_str());
    dir.mkpath("raw");

    std::string filename;

    if (!m_only_raw)
    {
      filename = cv::format("%s/color.png", frame_dir.c_str());
      imwrite(filename, image.rgb());
    }

    filename = cv::format("%s/raw/color.png", frame_dir.c_str());
    imwrite(filename, image.rawRgb());

    if (!m_only_raw && image.mappedDepth().data)
    {
      filename = cv::format("%s/mapped_depth.png", frame_dir.c_str());
      imwrite_normalized(filename, image.mappedDepth());

      filename = cv::format("%s/mapped_color.png", frame_dir.c_str());
      imwrite(filename, image.mappedRgb());

      filename = cv::format("%s/depth.yml", frame_dir.c_str());
      imwrite_yml(filename, image.mappedDepth());
    }

    if (!m_only_raw)
    {
      filename = cv::format("%s/raw/depth.png", frame_dir.c_str());
      if (image.rawDepth().data)
        imwrite_normalized(filename.c_str(), image.rawDepth());

      filename = cv::format("%s/depth.png", frame_dir.c_str());
      if (image.depth().data)
        imwrite_normalized(filename.c_str(), image.depth());

      filename = cv::format("%s/intensity.png", frame_dir.c_str());
      if (image.intensity().data)
        imwrite_normalized(filename.c_str(), image.intensity());
    }

    if (image.rawDepth().data)
    {
      if (m_use_binary_raw)
      {
        filename = cv::format("%s/raw/depth.raw", frame_dir.c_str());
        imwrite_Mat1f_raw(filename.c_str(), image.rawDepth());
      }
      else
      {
        filename = cv::format("%s/raw/depth.yml", frame_dir.c_str());
        imwrite_yml(filename.c_str(), image.rawDepth());
      }
    }

    if (image.rawIntensity().data)
    {
      if (m_use_binary_raw)
      {
        filename = cv::format("%s/raw/intensity.raw", frame_dir.c_str());
        imwrite_Mat1f_raw(filename.c_str(), image.rawIntensity());
      }
      else
      {
        filename = cv::format("%s/raw/intensity.yml", frame_dir.c_str());
        imwrite_yml(filename.c_str(), image.rawIntensity());
      }
    }

    if (!m_only_raw)
    {
      filename = cv::format("%s/raw/amplitude.png", frame_dir.c_str());
      if (image.rawAmplitude().data)
        imwrite_normalized(filename.c_str(), image.rawAmplitude());

      filename = cv::format("%s/amplitude.png", frame_dir.c_str());
      if (image.amplitude().data)
        imwrite_normalized(filename.c_str(), image.amplitude());
    }

    if (image.rawAmplitude().data)
    {
      if (m_use_binary_raw)
      {
        filename = cv::format("%s/raw/amplitude.raw", frame_dir.c_str());
        imwrite_Mat1f_raw(filename.c_str(), image.rawAmplitude());
      }
      else
      {
        filename = cv::format("%s/raw/amplitude.yml", frame_dir.c_str());
        imwrite_yml(filename.c_str(), image.rawAmplitude());
      }
    }

    ++m_frame_index;
  }
Пример #20
0
void LabicCV::display() {
	Mat depthMat(Size(width, height), CV_8UC3, Scalar(0));
    Mat left(cameras, Rect(0, 0, width, height));
    Mat right(cameras, Rect(width, 0, width, height));
    RGBDImage frame;

    uint32_t timestampPrevious = 0;
    long long unsigned int clicks = 0;
    double fps = 0;
    double seconds = 0;

    startCapture = !captureHold;

    hrclock::time_point first, start, end, lastCap;

    dinfo << "[LabicCV] Display started" << endl;

    first = lastCap = start = hrclock::now();

    do {
        kinect->grabRGBDImage(frame);
        
        // Skip redrawing if there was no change
        if (frame.timestamp() == timestampPrevious) continue;

        generateDepthImage(frame.depth(), depthMat);
        
        frame.rgb().copyTo(left);
        depthMat.copyTo(right);
        
        timestampPrevious = frame.timestamp();

        // Calculate FPS
        end = hrclock::now();
        seconds = diffTime(end, start);
        if (seconds > 0) fps = clicks++ / seconds;

        // If automatic mode is on and can start capture
        if (captureInterval > 0 && startCapture) {
			seconds = diffTimeMs(end,lastCap);

			if (seconds > captureInterval || !savedFrames) {
				//ddebug << "difftime for capture in ms: " << seconds << endl;
				lastCap = hrclock::now();
				if (!savedFrames) first = lastCap;
				saveFrame();
				savedFrames++;
			}
        }

        ostringstream fps_str;
        fps_str << "OpenCV FPS: " << fps;
        putText(cameras, fps_str.str(), Point(20,30), CV_FONT_HERSHEY_PLAIN, 1.0f, Scalar::all(0));

        processedFrames++;
    } while (!*stop);

    windowClosed = true;

    totalTime = diffTime(lastCap, first);
}
Пример #21
0
int main(int argc, char **argv)
{
    // Parse command line options.
    arg_base::set_help_option("-h");
    arg_parse(argc, argv);

    // Set debug level to 1.
    ntk::ntk_debug_level = 1;

    // Set current directory to application directory.
    // This is to find Nite config in config/ directory.
    QApplication app (argc, argv);
    QDir::setCurrent(QApplication::applicationDirPath());

    // Declare the global OpenNI driver. Only one can be instantiated in a program.
    OpenniDriver ni_driver;

    // Declare the frame grabber.
    OpenniGrabber grabber(ni_driver, opt::kinect_id());

    // High resolution 1280x1024 RGB Image.
    if (opt::high_resolution())
        grabber.setHighRgbResolution(true);

    // Use mirrored images.
    grabber.setMirrored(true);

    // Start the grabber.
    grabber.connectToDevice();
    grabber.start();

    // Holder for the current image.
    RGBDImage image;

    // Image post processor. Compute mappings when RGB resolution is 1280x1024.
    OpenniRGBDProcessor post_processor;

    namedWindow("depth");
    namedWindow("color");
    namedWindow("users");

    char last_c = 0;
    while (true && (last_c != 27))
    {
        // Wait for a new frame, get a local copy and postprocess it.
        grabber.waitForNextFrame();
        grabber.copyImageTo(image);
        post_processor.processImage(image);

        // Prepare the depth view, with skeleton and handpoint.
        cv::Mat1b debug_depth_img = normalize_toMat1b(image.depth());
        if (image.skeleton())
            image.skeleton()->drawOnImage(debug_depth_img);

        // Prepare the color view (mapped to depth frame), with skeleton and handpoint.
        cv::Mat3b debug_color_img;
        image.mappedRgb().copyTo(debug_color_img);
        if (image.skeleton())
            image.skeleton()->drawOnImage(debug_color_img);

        // Prepare the user mask view as colors.
        cv::Mat3b debug_users;
        image.fillRgbFromUserLabels(debug_users);

        imshow("depth", debug_depth_img);
        imshow("color", debug_color_img);
        imshow("users", debug_users);
        last_c = (cv::waitKey(10) & 0xff);
    }
}
Пример #22
0
int main(int argc, char **argv)
{
  // Parse command line options.
  arg_base::set_help_option("-h");
  arg_parse(argc, argv);

  // Set debug level to 1.
  ntk::ntk_debug_level = 1;

  // Set current directory to application directory.
  // This is to find Nite config in config/ directory.
  // FIXME: this is disabled for OpenCV QT compatibility.
  // QApplication app (argc, argv);
  // QDir::setCurrent(QApplication::applicationDirPath());

  // Prepare body event listeners.
  MyBodyListener body_event_listener;
  BodyEventDetector detector;
  detector.addListener(&body_event_listener);

  // Declare the global OpenNI driver. Only one can be instantiated in a program.
  OpenniDriver ni_driver;

  // Declare the frame grabber.
  OpenniGrabber grabber(ni_driver, opt::kinect_id());
  grabber.setBodyEventDetector(&detector);

  // High resolution 1280x1024 RGB Image.
  if (opt::high_resolution())
    grabber.setHighRgbResolution(true);

  // Start the grabber.
  grabber.connectToDevice();
  grabber.start();

  // Holder for the current image.
  RGBDImage image;

  // Image post processor. Compute mappings when RGB resolution is 1280x1024.
  OpenniRGBDProcessor post_processor;

  namedWindow("depth");
  namedWindow("color");
  namedWindow("users");

  while (true)
  {
    // Wait for a new frame, get a local copy and postprocess it.
    grabber.waitForNextFrame();
    grabber.copyImageTo(image);
    post_processor.processImage(image);

    // Get the last hand point position.
    cv::Point3f handpoint = body_event_listener.getLastHandPosInImage();
    ntk_dbg_print(handpoint, 1);

    // Prepare the depth view, with skeleton and handpoint.
    cv::Mat1b debug_depth_img = normalize_toMat1b(image.depth());
    if (image.skeleton())
      image.skeleton()->drawOnImage(debug_depth_img);
    circle(debug_depth_img, Point(handpoint.x, handpoint.y), 5, Scalar(0, 255, 255));

    // Prepare the color view, with skeleton and handpoint.
    cv::Mat3b debug_color_img;
    image.mappedRgb().copyTo(debug_color_img);
    if (image.skeleton())
      image.skeleton()->drawOnImage(debug_color_img);
    circle(debug_color_img, Point(handpoint.x, handpoint.y), 5, Scalar(0, 255, 255));

    // Prepare the user mask view as colors.
    cv::Mat3b debug_users;
    image.fillRgbFromUserLabels(debug_users);

    imshow("depth", debug_depth_img);
    imshow("color", debug_color_img);
    imshow("users", debug_users);
    cv::waitKey(10);
  }

  // return app.exec();
}
Пример #23
0
int main(int argc, char **argv)
{
    // Parse command line options.
    arg_base::set_help_option("-h");
    arg_parse(argc, argv);

    // Set debug level to 1.
    ntk::ntk_debug_level = 1;

    // Set current directory to application directory.
    // This is to find Nite config in config/ directory.
    QApplication app (argc, argv);
    QDir::setCurrent(QApplication::applicationDirPath());

    // Declare the global OpenNI driver. Only one can be instantiated in a program.
    OpenniDriver ni_driver;

    // Declare the frame grabber.
    OpenniGrabber grabber(ni_driver, opt::kinect_id());

    // High resolution 1280x1024 RGB Image.
    if (opt::high_resolution())
        grabber.setHighRgbResolution(true);

    // Start the grabber.
    grabber.connectToDevice();
    grabber.start();

    // Holder for the current image.
    RGBDImage image;

    // Image post processor. Compute mappings when RGB resolution is 1280x1024.
    OpenniRGBDProcessor post_processor;

    // Wait for a new frame, get a local copy and postprocess it.
    grabber.waitForNextFrame();
    grabber.copyImageTo(image);
    post_processor.processImage(image);

    ntk_ensure(image.calibration(), "Uncalibrated rgbd image, cannot project to 3D!");

    // Initialize the object modeler.
    PointCloud<PointXYZ> cloud;
    rgbdImageToPointCloud(cloud, image);

    TableObjectDetector<PointXYZ> detector;
    detector.setObjectVoxelSize(0.003); // 3 mm voxels.
    bool ok = detector.detect(cloud);
    ntk_throw_exception_if(!ok, "No cluster detected on a table plane!");

    for (int cluster_id = 0; cluster_id < detector.objectClusters().size(); ++cluster_id)
    {
        TableObjectRGBDModeler modeler;
        modeler.feedFromTableObjectDetector(detector, cluster_id);
        Pose3D pose = *image.calibration()->depth_pose;
        modeler.addNewView(image, pose);
        modeler.computeMesh();
        modeler.currentMesh().saveToPlyFile(cv::format("object%d.ply", cluster_id).c_str());
    }

    grabber.stop();
    grabber.disconnectFromDevice();
}
Пример #24
0
  void PlaneEstimator :: estimate(RGBDImage& image, Mat1b& plane_points)
  {

      // Passing from 3D to the optimizer

      const cv::Mat3f& normal_image = image.normal();
      const cv::Mat1f& distance_image = image.depth();
      cv::Mat1b& mask_image = image.depthMaskRef();
      cv::Mat1b objfilter;
      mask_image.copyTo(objfilter);
      plane_points = image.normal().clone();
      plane_points = 0;

    if (!image.normal().data)
    {
      ntk_dbg(0) << "WARNING: you must active the normal filter to get plane estimation!";
      return;
    }

    double min[dim];
    double max[dim];
    int i;

    for (i=0;i<dim;i++)
    {
      max[i] = 1000.0;
      min[i] = -1000.0;
    }
    m_solver.Setup(min,max,DifferentialEvolutionSolver::stBest1Exp,0.8,0.75);


   // Early estimation of plane points projecting the normal values

    for (int r = 1; r < plane_points.rows-1; ++r)
    for (int c = 1; c < plane_points.cols-1; ++c)
    {
      if (objfilter.data && objfilter(r,c))
      {
        cv::Vec3f normal = normal_image(r,c);
        double prod = normal.dot(m_ref_plane);
        if (prod > 0.95)
          plane_points(r,c) = 255;
        else
          plane_points(r,c) = 0;
      }
    }

    // cleaning of the surface very first estimation
    dilate(plane_points,plane_points,cv::Mat());
    erode(plane_points,plane_points,cv::Mat());
    //imwrite("plane-initial.png",plane_points);

    std::vector<Point3f>& g = m_solver.planePointsRef();

    g.clear();
    
    for (int r = 1; r < plane_points.rows-1; ++r)
    for (int c = 1; c < plane_points.cols-1; ++c)
    {
      if (plane_points(r,c))
      {
        // possible table member!
        Point3f p3d = image.calibration()->depth_pose->unprojectFromImage(Point2f(c,r), distance_image(r,c));
        g.push_back(p3d);
      }
    }
    
    // Calculating...
    m_solver.Solve(max_generations);

    double *solution = m_solver.Solution();

    // Plane normalizer
    float suma = solution[0] + solution[1] + solution[2] + solution[3] ;
    for (int i = 0; i < 4; i++)
      solution[i] = solution[i]/ suma;

    ntk::Plane plano (solution[0], solution[1], solution[2], solution[3]);
    //Update RGBD object
    m_plane.set(solution[0], solution[1], solution[2], solution[3]);


    // Final estimation of plane points projecting the normal values

     cv::Vec3f diffevolnormal(solution[0], solution[1], solution[2]);

     for (int r = 1; r < plane_points.rows-1; ++r)
     for (int c = 1; c < plane_points.cols-1; ++c)
     {
       if (objfilter.data && objfilter(r,c))
       {
         cv::Vec3f normal = normal_image(r,c);
         double prod = normal.dot(diffevolnormal);

         if (prod > 0.5)
           plane_points(r,c) = 255;
         else
           plane_points(r,c) = 0;
       }
     }

     // Cleaning of the DE plane-pixels solution
     dilate(plane_points,plane_points,cv::Mat());
     erode(plane_points,plane_points,cv::Mat());
    // imwrite("plane-DE.png",plane_points);


  }
Пример #25
-1
LabelImage RandomForestImage::improveHistograms(const RGBDImage& image, const LabelImage& labelImage, const bool onGPU, bool useDepthImages) const {

    LabelImage prediction(image.getWidth(), image.getHeight());

    const LabelType numClasses = getNumClasses();

    if (treeData.size() != ensemble.size()) {
        throw std::runtime_error((boost::format("tree data size: %d, ensemble size: %d. histograms normalized?")
                % treeData.size() % ensemble.size()).str());
    }

    cuv::ndarray<float, cuv::host_memory_space> hostProbabilities(
            cuv::extents[numClasses][image.getHeight()][image.getWidth()],
            m_predictionAllocator);

    //These offsets should have been used instead of traversing to the leaf again
/*	cuv::ndarray<unsigned int, cuv::dev_memory_space> nodeOffsets(
			cuv::extents[image.getHeight()][image.getWidth()],
			m_predictionAllocator);
*/

    if (onGPU) {
        cuv::ndarray<float, cuv::dev_memory_space> deviceProbabilities(
                cuv::extents[numClasses][image.getHeight()][image.getWidth()],
                m_predictionAllocator);
        cudaSafeCall(cudaMemset(deviceProbabilities.ptr(), 0, static_cast<size_t>(deviceProbabilities.size() * sizeof(float))));

        {
            utils::Profile profile("classifyImagesGPU");
            for (const boost::shared_ptr<const TreeNodes>& data : treeData) {
                classifyImage(treeData.size(), deviceProbabilities, image, numClasses, data, useDepthImages);
                bool found_tree = false;
				//should be change to parallel for and add lock
				for (size_t treeNr = 0; treeNr < ensemble.size(); treeNr++) {
					if (data->getTreeId() == ensemble[treeNr]->getId()) {
						found_tree  =true;
						const boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> >& tree = ensemble[treeNr]->getTree();
						//this should have been used and done before trying to classify the images, since it doesn't change
						//std::vector<size_t> leafSet;
						//tree->collectLeafNodes(leafSet);
						for (int y = 0; y < image.getHeight(); y++)
							for (int x = 0; x < image.getWidth(); x++) {
								LabelType label = labelImage.getLabel(x,y);
									if (!shouldIgnoreLabel(label)) {
										PixelInstance pixel(&image, label, x, y);
										//This should be changed. When classifying the image, the nodeoffsets should be returned and those used directly
										//instead of traversing again to the leaves. As a test, can check if the nodeoffset is the same as the one returned
										//by travertoleaf
										tree->setAllPixelsHistogram(pixel);
					                }
							}
					}
					if (found_tree)
						break;
				}
            }
        }

    }
    //should also add the CPU code!
    return prediction;
}