Esempio n. 1
0
    system::error_code Application::prepare()
    {
        if(m_OptCameraName.empty())
        {
            SI(hal::DeviceManager).enumerateDevices<hal::Camera>([this](intrusive_ptr<hal::Camera> const &c) -> bool
            {
                m_Camera = c;
                return false;
            });
        }
        else
        {
            m_Camera = SI(hal::DeviceManager).findDevice<hal::Camera>(m_OptCameraName);
        }
        
        if(!m_Camera || m_Camera->open())
            return base::makeErrorCode(base::kENoSuchDevice);
        
        
        
        m_Camera->beginConfiguration();
        m_Camera->setPreviewSize(Vec2i(m_OptCameraWidth, m_OptCameraHeight));
        if(!m_Camera->getSupportedVideoSizes().empty())
        {
            m_Camera->setVideoSize(Vec2i(m_OptCameraWidth, m_OptCameraHeight));
        }
        m_Camera->endConfiguration();
        
        m_PoseTracker = SI(cv::AlgorithmLibrary).createPoseTracker(m_OptTracker);
        m_PoseTracker->setImageSize(m_Camera->getPreviewSize());
        if(loadCameraCalibration(dataDirectory()/(std::string(m_Camera->name() + ".ccal"))))
        {
            m_PoseTracker->setCalibration(m_CameraCalibration);
            //m_Camera->close();
            //m_Camera.reset();
            //return base::makeErrorCode(base::kENotFound);
        }

        if(m_PoseTracker->prepare())
        {
            m_Camera->close();
            m_Camera.reset();
            m_PoseTracker.reset();
            return base::makeErrorCode(base::kENotFound);
        }
        
        m_Camera->cameraDelegate() += this;
        
        m_Camera->startPreview();
        m_Camera->startRecording();

        m_CameraServer.setCamera(m_Camera);
        m_CameraServer.start();
        m_RemoteControlServer.start();
        
        return base::makeErrorCode(base::kENoError);
    }
ObjectDetector::ObjectDetector() : it(handle), _handle("~")
{
    camera_image = it.subscribe("ardrone/image_raw", 1, &ObjectDetector::onImage, this);
    navdata_sub = handle.subscribe("ardrone/navdata", 1, &ObjectDetector::onNavdata, this);
    objects_pub = handle.advertise<cloudrone::Object>("cloudrone/objects", 1);
    
    _handle.param("calibdata", calibdata, std::string(CAMERA_PARAMS_FILE));

    loadCameraCalibration(calibdata);

    cascadeDetectors.resize(2);
    cascadeDetectors[0].first.load("/home/ardrone/cascadeTreeLBP.xml");
    cascadeDetectors[0].second = 0;
    
    cascadeDetectors[1].first.load("/home/ardrone/cascadeStarNew.xml");
    cascadeDetectors[1].second =1;

    view_size = Size(500, 500); // TEMP
    viewid = 0;
}
Esempio n. 3
0
void sptam::sptam_node::onImages(
  const sensor_msgs::ImageConstPtr& l_image_msg, const sensor_msgs::CameraInfoConstPtr& left_info,
  const sensor_msgs::ImageConstPtr& r_image_msg, const sensor_msgs::CameraInfoConstPtr& right_info
)
{
  ROS_INFO_STREAM("dt: " << (l_image_msg->header.stamp - r_image_msg->header.stamp).toSec());

  // Save current Time
  ros::Time currentTime = l_image_msg->header.stamp;

  // If using odometry, try to get a new estimate for the current pose.
  // if not using odometry, camera_to_odom is left blank.
  tf::StampedTransform camera_to_odom;
  if ( use_odometry_ )
  {
    if ( not getCameraInOdom(camera_to_odom, currentTime) )
      return;

    TFPose2CameraPose(odom_to_map_ * camera_to_odom, cameraPose_);
  }
  else
  {
    cv::Point3d currentCameraPosition;
    cv::Vec4d currentCameraOrientation;
    motionModel_->PredictNextCameraPose(currentCameraPosition, currentCameraOrientation);
    cameraPose_ = CameraPose( currentCameraPosition, currentCameraOrientation );
  }

  // If SPTAM has not been initialized yet, do it
  if ( sptam_ == nullptr )
  {
    ROS_INFO_STREAM("init calib");
    loadCameraCalibration(left_info, right_info);
  }

  // convert image to OpenCv cv::Mat format
  cv_bridge::CvImageConstPtr bridgeLeft_ptr = cv_bridge::toCvShare(l_image_msg, "rgb8");
  cv_bridge::CvImageConstPtr bridgeRight_ptr = cv_bridge::toCvShare(r_image_msg, "rgb8");

  // save images
  cv::Mat imageLeft = bridgeLeft_ptr->image;
  cv::Mat imageRight = bridgeRight_ptr->image;

  #ifdef SHOW_PROFILING
      double start, end;
      start = GetSeg();
  #endif // SHOW_PROFILING

  #ifdef SHOW_PROFILING
    double startStep, endStep;
    startStep = GetSeg();
  #endif

  FeatureExtractorThread featureExtractorThreadLeft(imageLeft, *featureDetector_, *descriptorExtractor_);
  FeatureExtractorThread featureExtractorThreadRight(imageRight, *featureDetector_, *descriptorExtractor_);

  featureExtractorThreadLeft.WaitUntilFinished();
  const std::vector<cv::KeyPoint>& keyPointsLeft = featureExtractorThreadLeft.GetKeyPoints();
  const cv::Mat& descriptorsLeft = featureExtractorThreadLeft.GetDescriptors();

  featureExtractorThreadRight.WaitUntilFinished();
  const std::vector<cv::KeyPoint>& keyPointsRight = featureExtractorThreadRight.GetKeyPoints();
  const cv::Mat& descriptorsRight = featureExtractorThreadRight.GetDescriptors();

  ImageFeatures imageFeaturesLeft = ImageFeatures(imageLeft, keyPointsLeft, descriptorsLeft, mapper_params_.matchingCellSize);
  ImageFeatures imageFeaturesRight = ImageFeatures(imageRight, keyPointsRight, descriptorsRight, mapper_params_.matchingCellSize);

  #ifdef SHOW_PROFILING
    endStep = GetSeg();
    WriteToLog(" tk Extract: ", startStep, endStep);
  #endif

  // if the map was not initialized, try to build it
  if( not map_.nMapPoints() )
  {
    ROS_INFO("Trying to intialize map...");

    // Create keyFrame
    StereoFrame* frame = new StereoFrame(
      cameraPose_, cameraParametersLeft_,
      stereo_baseline_, cameraParametersRight_,
      imageFeaturesLeft, imageFeaturesRight, true
    );

    frame->SetId( 0 ); // Set Keyframe ID

    // Initialize MapMaker
    // TODO: this bool must be a local variable to do not check not map_.nMapPoints() in the "if"
    /*bool isMapInitialized = */InitFromStereo( map_, *frame, imageLeft, *rowMatcher_);
  }
  // if the map is already initialized, do tracking
  else
  {

    cameraPose_ = sptam_->track(cameraPose_, imageFeaturesLeft, imageFeaturesRight, imageLeft, imageRight);

    #ifdef SHOW_PROFILING
      end = GetSeg();
      std::cout << GetSeg() << " tk trackingtotal: " << (end - start) << std::endl;

      std::stringstream message;
      message << std::fixed <<  GetSeg() << " tk trackingtotal: " << (end - start) << std::endl;
      Logger::Write( message.str() );
    #endif

    // fix the error between map and odom to correct the current pose.
    if ( use_odometry_ )
      fixOdomFrame( cameraPose_, camera_to_odom, currentTime );
    else
    // if odometry is disabled, odom_to_map_ actually contains the map->cam transform because I'm too lazy
    {
      motionModel_->UpdateCameraPose(cameraPose_.GetPosition(), cameraPose_.GetOrientationQuaternion());

      tf::Transform cam_to_map;
      CameraPose2TFPose(cameraPose_, cam_to_map);

      tf::StampedTransform base_to_cam;
      transform_listener_.lookupTransform(camera_frame_, base_frame_, currentTime, base_to_cam);

      std::lock_guard<std::mutex> lock( odom_to_map_mutex_ );
      odom_to_map_ = cam_to_map * base_to_cam;
    }
  }

  // Publish Map To be drawn by rviz visualizer
  publishMap();

  // Publish the camera Pose
  publishPose( l_image_msg->header.seq, currentTime, cameraPose_ );
}