예제 #1
0
파일: tracker.cpp 프로젝트: mgsloan/kiimote
void trackHead(ntk::RGBDImage& img, Point3f& hpos) {
  Mat1f& depth = img.depthRef();
  Mat1b& mask = img.depthMaskRef();
  const RGBDCalibration* calib = img.calibration();
  if (calib) {
    const Pose3D* rgb_pose = calib->rgb_pose;
    const Pose3D* depth_pose = calib->depth_pose;
    for (int y = 0; y < depth.rows; y+=10)
    for (int x = 0; x < depth.cols; x+=10) {
      if (mask.at<bool>(y, x)) {
        // probably a more opencv-ey way to do this, but eh.
        y = min(depth.rows, y + 20);
        int minx = x - 9, maxx = x + 40;
        for(int c = max(x - 9, 0); c < x + 40 && c < depth.cols; c++) {
          if(!mask.at<bool>(y, x)) {
            if (c < x) minx = c;
              if (c > x) { maxx = c; break; }
          }
        }
        x = (minx + maxx) / 2;
        float val = depth.at<float>(y, x);
        Point2f screenpos(x, y);
        cv::Point3f p = depth_pose->unprojectFromImage(screenpos, val);
        hpos.x = p.x; hpos.y = p.y; hpos.z = p.z - 0.05;
        return;
      }
    }
  }
}
예제 #2
0
void PeopleTracker::estimateBackground(const ntk::RGBDImage& image)
{
    image.depth().copyTo(m_background_depth);
    image.copyTo(m_foreground_image);

    estimateGroundPlaneFromUserInput(image);
    estimateInitialWorkingZone(image);
    computeVirtualTopViewPose();
    computeVirtualTopView();

    estimateWorkingZoneFromUserInput(image);
    computeVirtualTopViewPose();
}
예제 #3
0
void RawImagesWindow :: update(const ntk::RGBDImage& image)
{
    if (image.hasRgb() && ui->colorView->isVisible())
        ui->colorView->setImage(image.rgb());

    if (ui->depthView->isVisible())
    {
        double min_dist = m_controller.rgbdProcessor().minDepth();
        double max_dist = m_controller.rgbdProcessor().maxDepth();
        cv::Mat1f masked_distance;
        image.depth().copyTo(masked_distance);
        apply_mask(masked_distance, image.depthMask());
        cv::Mat3b depth_as_color;
        compute_color_encoded_depth(masked_distance, depth_as_color, &min_dist, &max_dist);
        ui->depthView->setImage(depth_as_color);
        // ui->depthView->setImage(image.depth(), &min_dist, &max_dist);
    }
    // ui->depthView->setImageAsColor(image.depth(), &min_dist, &max_dist);
    // ui->depthView->setImage(image.depth(), &min_dist, &max_dist);
    if (image.amplitude().data && ui->amplitudeView->isVisible())
        ui->amplitudeView->setImage(image.amplitude());
    if (image.intensity().data && ui->intensityView->isVisible())
        ui->intensityView->setImage(image.intensity());

    int x,y;
    ui->depthView->getLastMousePos(x,y);
    m_controller.on_depth_mouse_moved(x,y);
}
void ModelAcquisitionController :: newFrame(const ntk::RGBDImage& image)
{
  if (m_paused)
    return;

  if (!m_pose_estimator)
  {
    ntk_dbg(1) << "Warning: no pose estimator";
    return;
  }

  if (!m_new_frame_run.isFinished())
  {
    return;
  }
  else
  {
    if (m_new_frame_run.isStarted() && m_new_frame_run.resultCount() > 0)
    {
      if (m_new_frame_run.result())
      {        
        m_controller.modelAcquisitionWindow()->ui->mesh_view->addMesh(m_modeler.currentMesh(), Pose3D(), MeshViewer::FLAT);
        m_controller.modelAcquisitionWindow()->ui->mesh_view->swapScene();
      }
    }
    image.copyTo(m_current_image);
    m_new_frame_run = QtConcurrent::run(this, &ModelAcquisitionController::newFrameThread, &m_current_image);
    m_new_frame_run.waitForFinished();
  }
}
예제 #5
0
void DetectorWindow :: update(const ntk::RGBDImage& image)
{
  cv::Mat1b mask; std::list<cv::Rect> rects;
  m_controller.objectDetector()->detect(image.depth(), mask, rects);
  ui->maskImage->setImage(mask);
  ui->maskImage->setRects(rects);
}
예제 #6
0
void RawImagesWindow :: update(const ntk::RGBDImage& image)
{
  if (ui->colorView->isVisible())
    ui->colorView->setImage(image.rgb());

  if (ui->depthView->isVisible())
  {
    double min_dist = m_controller.rgbdProcessor().minDepth();
    double max_dist = m_controller.rgbdProcessor().maxDepth();
    cv::Mat1f masked_distance; image.depth().copyTo(masked_distance);
    apply_mask(masked_distance, image.depthMask());
    cv::Mat3b depth_as_color;
    compute_color_encoded_depth(masked_distance, depth_as_color, &min_dist, &max_dist);
    if (image.skeleton())
      image.skeleton()->drawOnImage(depth_as_color);
    ui->depthView->setImage(depth_as_color);
  }
  // ui->depthView->setImageAsColor(image.depth(), &min_dist, &max_dist);
  // ui->depthView->setImage(image.depth(), &min_dist, &max_dist);
  if (image.userLabels().data && ui->intensityView->isVisible())
  {
    cv::Mat3b user_labels;
    image.fillRgbFromUserLabels(user_labels);
    ui->intensityView->setImage(user_labels);
  }

  int x,y;
  ui->depthView->getLastMousePos(x,y);
  m_controller.on_depth_mouse_moved(x,y);
}
예제 #7
0
void PeopleTracker::estimateGroundPlaneFromUserInput(const ntk::RGBDImage& image)
{
    // Estimate ground plane equation asking 3 points to the user.
    TrackerGroundMouseData ground_mouse_data;
    ground_mouse_data.window_name = "Select 3 points to estimate the ground plane (right click)";
    namedWindow(ground_mouse_data.window_name,
                CV_WINDOW_NORMAL|CV_WINDOW_KEEPRATIO|CV_GUI_EXPANDED);
    ground_mouse_data.image = toMat3b(normalize_toMat1b(image.depth()));
    imshow(ground_mouse_data.window_name, ground_mouse_data.image);
    setMouseCallback(ground_mouse_data.window_name, on_tracker_ground_mouse, &ground_mouse_data);
    while (ground_mouse_data.points.size() != 3)
    {
        waitKey(30);
        QApplication::processEvents();
    }
    ntk_dbg(0) << "I am here!";

    Point3f p0, p1, p2;
    p0 = image.calibration()->depth_pose->unprojectFromImage(ground_mouse_data.points[0],
                                                             image.depth()(ground_mouse_data.points[0]));
    p1 = image.calibration()->depth_pose->unprojectFromImage(ground_mouse_data.points[1],
                                                             image.depth()(ground_mouse_data.points[1]));
    p2 = image.calibration()->depth_pose->unprojectFromImage(ground_mouse_data.points[2],
                                                             image.depth()(ground_mouse_data.points[2]));
    destroyWindow(ground_mouse_data.window_name);
    Vec3f normal = -(p1-p0).cross(p2-p0);
    normalize(normal);
    if (normal[2] < 0)
        normal = -normal;
    m_ground_plane = Plane(normal, p0);
}
예제 #8
0
void PoseRecognizer :: setSkeleton(const ntk::RGBDImage& image)
{
	if (image.skeleton()) {
	
		jointPos[0] = image.skeleton()->getProjectedJoint(ntk::Skeleton::NTK_SKEL_LEFT_SHOULDER);
		jointPos[1] = image.skeleton()->getProjectedJoint(ntk::Skeleton::NTK_SKEL_LEFT_ELBOW);
		jointPos[2] = image.skeleton()->getProjectedJoint(ntk::Skeleton::NTK_SKEL_LEFT_HAND);
		jointPos[3] = image.skeleton()->getProjectedJoint(ntk::Skeleton::NTK_SKEL_RIGHT_SHOULDER);
		jointPos[4] = image.skeleton()->getProjectedJoint(ntk::Skeleton::NTK_SKEL_RIGHT_ELBOW);
		jointPos[5] = image.skeleton()->getProjectedJoint(ntk::Skeleton::NTK_SKEL_RIGHT_HAND);
	}
}
예제 #9
0
파일: tracker.cpp 프로젝트: mgsloan/kiimote
void trackHands(ntk::RGBDImage& img, Point3f& rpos, Point3f& bpos) {
  Mat1f& depth = img.depthRef();
  Mat1b& mask = img.depthMaskRef();
  const RGBDCalibration* calib = img.calibration();

  if (calib) {
    const Pose3D* rgb_pose = calib->rgb_pose;
    const Pose3D* depth_pose = calib->depth_pose;

    Mat rgb = img.rgb().clone();
    //blur(rgb, rgb, Size(5, 5));

    int wid = 64, ht = 64;
    int min_rad = 4;
    cv::Point2f fudge(8, 8);
    
    Mat1b result(Size(wid, ht), 0);
    Point2f top; 
    double rrad = min_rad,
           brad = min_rad;
    vector<Mat> hsvs;
  
    Point3f upleft = depth_pose->projectToImage(rpos) - Point3f(wid / 2, ht / 2, 0);
    if (upleft.x == upleft.x) { //!isNaN(upleft)
      filterColor(img, hsvs, result, upleft, fudge, 160, 10, 160, 256, 80, 256);
      findLargest(result, top, rrad);
      top.x += upleft.x; top.y += upleft.y;
      top *= STEP_SIZE;
      mapPos(depth_pose, depth, top, rpos);
      //printf("partial r %f %f %f \n", top.x, top.y, rrad);
      hsvs.clear();
    }

    upleft = depth_pose->projectToImage(bpos) - Point3f(wid / 2, ht / 2, 0);
    if (upleft.x == upleft.x) { //!isNaN(upleft)
      result = Mat1b(Size(wid, ht), 0);
      filterColor(img, hsvs, result, upleft, fudge, 70, 100, 50, 256, 50, 256);
      findLargest(result, top, brad);
      //displayBoolean(result);
      top.x += upleft.x; top.y += upleft.y;
      top *= STEP_SIZE;
      mapPos(depth_pose, depth, top, bpos);
//      printf("partial b %f %f %f \n", top.x, top.y, brad);
      hsvs.clear();
    }
    
    upleft = Point3f(0, 0, 0);
    if (rrad <=  min_rad) {
      top = Point2f(0, 0);
      result = Mat1b(Size(depth.cols / STEP_SIZE, depth.rows / STEP_SIZE), 0);
      filterColor(img, hsvs, result, upleft, fudge, 160, 10, 160, 256, 80, 256);
      //Mat1b h(hsvs[0]);
      //displayVal(h);
      findLargest(result, top, rrad);
      top *= STEP_SIZE;
      mapPos(depth_pose, depth, top, rpos);
      //printf("full r %f %f %f \n", top.x, top.y, rrad);
    }
    if (brad <= min_rad) {
      top = Point2f(0, 0);
      result = Mat1b(Size(depth.cols / STEP_SIZE, depth.rows / STEP_SIZE), 0);
      filterColor(img, hsvs, result, upleft, fudge, 70, 100, 50, 256, 50, 256);
      findLargest(result, top, brad);
      top *= STEP_SIZE;
      mapPos(depth_pose, depth, top, bpos);
      //printf("full b %f %f %f \n", top.x, top.y, brad);
    }
  } else {
    bpos.x = bpos.y = bpos.z = 0;
    rpos.x = rpos.y = rpos.z = 0;
  }
}