Beispiel #1
0
void cvTackBarEvents(int pos,void*) {
	if (i_threshold_param1 < 3) 
		i_threshold_param1 = 3;
	if (i_threshold_param1 % 2 != 1) 
		i_threshold_param1++;
	if (i_threshold_param2 < 1) 
		i_threshold_param2 = 1;

	threshold_param1 = i_threshold_param1;
	threshold_param2 = i_threshold_param2;
	marker_dector.setThresholdParams(threshold_param1, threshold_param2);

	//recompute
	marker_dector.detect(input_image, markers, camera_params);
	input_image.copyTo(input_image_copy);

	for(unsigned int i=0; i < markers.size(); i++)
		markers[i].draw(input_image, Scalar(0, 0, 255), 2);

	//draw a 3d cube in each marker if there is 3d info
	if (camera_params.isValid())
		for(unsigned int i=0; i < markers.size(); i++)
			aruco::CvDrawingUtils::draw3dCube(input_image_copy, markers[i], camera_params);

	cv::imshow("in", input_image_copy);
	cv::imshow("thres", marker_dector.getThresholdedImage());
}
bool readParameters(int argc, char** argv)
{
    
    if (argc<4) {
        usage();
        return false;
    }
    
    TheInputVideo=argv[1];
    
    /*   // read input video
    if (TheInputVideo=="live") TheVideoCapturer.open(0);
    else TheVideoCapturer.open(argv[1]);
    if (!TheVideoCapturer.isOpened())
    {
        cerr<<"Could not open video"<<endl;
        return false;
    }*/
    
    //read from camera
    if (TheInputVideo=="live") TheVideoCapturer.open(0);
    else TheVideoCapturer.open(TheInputVideo);
    if (!TheVideoCapturer.isOpened())
    {
        cerr<<"Could not open video"<<endl;
        return -1;
    }
    
    // read intrinsic file
    try {
        CameraParams.readFromXMLFile(argv[2]);
        } catch (std::exception &ex) {
        cout<<ex.what()<<endl;
        return false;
    }
    
    // read board file
    try {
        TheBoardConfig.readFromFile(argv[3]);
        } catch (std::exception &ex) {
        cout<<ex.what()<<endl;
        return false;
    }
    
    if(argc>4) TheMarkerSize=atof(argv[4]);
    else TheMarkerSize=1.;
    return true;
}
Beispiel #3
0
/*!
 *
 */
static void cvTrackBarEvents(int ,void*)
{
  if (iThresParam1<3)
    iThresParam1=3;
  if (iThresParam1%2!=1)
    iThresParam1++;
  if (dThresParam2<1)
    dThresParam2=1;

  dThresParam1=iThresParam1;
  dThresParam2=iThresParam2;
  boardDetector.getMarkerDetector().setThresholdParams(dThresParam1,dThresParam2);

  //recompute
  //Detection of the board
  float probDetect=boardDetector.detect( inpImg);
  inpImg.copyTo(inpImgCpy);
  if (params.isValid() && probDetect>0.2)
    aruco::CvDrawingUtils::draw3dAxis(inpImgCpy,boardDetector.getDetectedBoard(),params);


  cv::imshow("in",inpImgCpy);
  cv::imshow("thres",boardDetector.getMarkerDetector().getThresholdedImage());
}
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    //        double ticksBefore = cv::getTickCount();
    static tf::TransformBroadcaster br;
    if(cam_info_received)
    {
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect
        mDetector.detect(inImage, markers, camParam, marker_size, false);
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker
          if(markers[i].id == marker_id)
          {
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
            tf::StampedTransform stampedTransform(transform, ros::Time::now(),
                                                  parent_name, child_name);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = parent_name;
            poseMsg.header.stamp = ros::Time::now();
            pose_pub.publish(poseMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            //CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = ros::Time::now();
          out_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = ros::Time::now();
          debug_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }

        //            ROS_INFO("runtime: %f ms",
        //                     1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
    void image_callback(const sensor_msgs::ImageConstPtr& msg)
    {
        static tf::TransformBroadcaster br;
        if(cam_info_received)
        {
            cv_bridge::CvImagePtr cv_ptr;
            try
            {
                cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
                inImage = cv_ptr->image;

                //detection results will go into "markers"
                markers.clear();
                //Ok, let's detect
                mDetector.detect(inImage, markers, camParam, marker_size, false);
                //for each marker, draw info and its boundaries in the image
                for(size_t i=0; i<markers.size(); ++i)
                {
                    // only publishing the selected marker
                    if(markers[i].id == marker_id)
                    {
                        tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
                        tf::StampedTransform cameraToReference;
                        cameraToReference.setIdentity();

                        if ( reference_frame != camera_frame )
                        {
                            getTransform(reference_frame,
                                         camera_frame,
                                         cameraToReference);
                        }

                        transform =
                            static_cast<tf::Transform>(cameraToReference)
                            * static_cast<tf::Transform>(rightToLeft)
                            * transform;

                        tf::StampedTransform stampedTransform(transform, ros::Time::now(),
                                                              reference_frame, marker_frame);
                        br.sendTransform(stampedTransform);
                        geometry_msgs::PoseStamped poseMsg;
                        tf::poseTFToMsg(transform, poseMsg.pose);
                        poseMsg.header.frame_id = reference_frame;
                        poseMsg.header.stamp = ros::Time::now();
                        pose_pub.publish(poseMsg);

                        geometry_msgs::TransformStamped transformMsg;
                        tf::transformStampedTFToMsg(stampedTransform, transformMsg);
                        transform_pub.publish(transformMsg);

                        geometry_msgs::Vector3Stamped positionMsg;
                        positionMsg.header = transformMsg.header;
                        positionMsg.vector = transformMsg.transform.translation;
                        position_pub.publish(positionMsg);
                    }
                    // but drawing all the detected markers
                    markers[i].draw(inImage,cv::Scalar(0,0,255),2);
                }

                //draw a 3d cube in each marker if there is 3d info
                if(camParam.isValid() && marker_size!=-1)
                {
                    for(size_t i=0; i<markers.size(); ++i)
                    {
                        CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
                    }
                }

                if(image_pub.getNumSubscribers() > 0)
                {
                    //show input with augmented information
                    cv_bridge::CvImage out_msg;
                    out_msg.header.stamp = ros::Time::now();
                    out_msg.encoding = sensor_msgs::image_encodings::RGB8;
                    out_msg.image = inImage;
                    image_pub.publish(out_msg.toImageMsg());
                }

                if(debug_pub.getNumSubscribers() > 0)
                {
                    //show also the internal image resulting from the threshold operation
                    cv_bridge::CvImage debug_msg;
                    debug_msg.header.stamp = ros::Time::now();
                    debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
                    debug_msg.image = mDetector.getThresholdedImage();
                    debug_pub.publish(debug_msg.toImageMsg());
                }
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR("cv_bridge exception: %s", e.what());
                return;
            }
        }
    }
Beispiel #6
0
/*!
 *
 */
int main(int argc,char **argv)
{
  readArguments(argc,argv);

  aruco::BoardConfiguration boardConf;
  boardConf.readFromFile(boC->sval[0]);

  cv::VideoCapture vcapture;

  //read from camera or from  file
  if (strcmp(inp->sval[0], "live")==0)
  {
    vcapture.open(0);
    vcapture.set(CV_CAP_PROP_FRAME_WIDTH,  wid->ival[0]);
    vcapture.set(CV_CAP_PROP_FRAME_HEIGHT, hei->ival[0]);
    int val = CV_FOURCC('M', 'P', 'E', 'G');
    vcapture.set(CV_CAP_PROP_FOURCC, val);
  }
  else
    vcapture.open(inp->sval[0]);

  //check video is open
  if (!vcapture.isOpened())
  {
    std::cerr<<"Could not open video"<<std::endl;
    return -1;
  }

  //read first image to get the dimensions
  vcapture>>inpImg;

  //Open outputvideo
  cv::VideoWriter vWriter;
  if (out->count)
    vWriter.open(out->sval[0], CV_FOURCC('M','J','P','G'), fps->ival[0], inpImg.size());

  //read camera parameters if passed
  if (intr->count)
  {
    params.readFromXMLFile(intr->sval[0]);
    params.resize(inpImg.size());
  }

  //Create gui
  cv::namedWindow("thres", CV_GUI_NORMAL | CV_WINDOW_AUTOSIZE);
  cv::namedWindow("in", CV_GUI_NORMAL | CV_WINDOW_AUTOSIZE);
  cvMoveWindow("thres", 0, 0);
  cvMoveWindow("in", inpImg.cols + 5, 0);
  boardDetector.setParams(boardConf,params,msiz->dval[0]);
  boardDetector.getMarkerDetector().getThresholdParams( dThresParam1,dThresParam2);
//  boardDetector.getMarkerDetector().enableErosion(true);//for chessboards
  iThresParam1=dThresParam1;
  iThresParam2=dThresParam2;
  cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTrackBarEvents);
  cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTrackBarEvents);
  int index=0;

  ///////////////
  // Main Lopp //
  ///////////////
  while(appRunnin)
  {
    inpImg.copyTo(inpImgCpy);
    index++;                                    //number of images captured
    double tick = static_cast<double>(cv::getTickCount());//for checking the speed
    float probDetect=boardDetector.detect(inpImg); //Detection of the board
    tick=(static_cast<double>(cv::getTickCount())-tick)/cv::getTickFrequency();
    std::cout<<"Time detection="<<1000*tick<<" milliseconds"<<std::endl;
    //print marker borders
    for (unsigned int i=0; i<boardDetector.getDetectedMarkers().size(); i++)
      boardDetector.getDetectedMarkers()[i].draw(inpImgCpy,cv::Scalar(0,0,255),1);

    //print board
    if (params.isValid())
    {
      if ( probDetect>thre->dval[0])
      {
        aruco::CvDrawingUtils::draw3dAxis( inpImgCpy,boardDetector.getDetectedBoard(),params);
      }
    }
    //DONE! Easy, right?

    //show input with augmented information and the thresholded image
    cv::imshow("in",inpImgCpy);
    cv::imshow("thres",boardDetector.getMarkerDetector().getThresholdedImage());

    // write to video if desired
    if (out->count)
    {
      //create a beautiful composed image showing the thresholded
      //first create a small version of the thresholded image
      cv::Mat smallThres;
      cv::resize(boardDetector.getMarkerDetector().getThresholdedImage(),smallThres,
                 cv::Size(inpImgCpy.cols/3,inpImgCpy.rows/3));
      cv::Mat small3C;
      cv::cvtColor(smallThres,small3C,CV_GRAY2BGR);
      cv::Mat roi=inpImgCpy(cv::Rect(0,0,inpImgCpy.cols/3,inpImgCpy.rows/3));
      small3C.copyTo(roi);
      vWriter<<inpImgCpy;
    }

    processKey(cv::waitKey(waitTime));//wait for key to be pressed
    appRunnin &= vcapture.grab();
    vcapture.retrieve(inpImg);
  }

  arg_freetable(argtable,sizeof(argtable)/sizeof(argtable[0]));
  return EXIT_SUCCESS;
}
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    static tf::TransformBroadcaster br;

    
    if(cam_info_received)
    {
      ros::Time curr_stamp(ros::Time::now());
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect

        mDetector.detect(inImage, markers, camParam, marker_size, false);
		
		//ROS_INFO("x: %0.3f y: %0.3f z: %0.3f", markers[0].Tvec.at<float>(0,0), markers[0].Tvec.at<float>(0,1), markers[0].Tvec.at<float>(0,2));	
		
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker

          //----------------------add all pose messages of markers detected --------------------------------------
          //if(markers[i].id == marker_id)


          if(markers[i].id == 26 || markers[i].id == 35 || markers[i].id == 58 || markers[i].id == 163 || markers[i].id == 640 || markers[i].id == 512 || markers[i].id == 43 || markers[i].id == 291 || markers[i].id == 355)

          {
		  // by Weiwei
			//ROS_INFO("x: %0.3f y: %0.3f z: %0.3f", markers[i].Tvec.at<float>(0,0), markers[i].Tvec.at<float>(0,1), markers[i].Tvec.at<float>(0,2));		
			// 
			
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
			
			

			
            tf::StampedTransform cameraToReference;
            cameraToReference.setIdentity();

            if ( reference_frame != camera_frame )
            {
              getTransform(reference_frame,
                           camera_frame,
                           cameraToReference);
            }

            transform = 
              static_cast<tf::Transform>(cameraToReference) 
              * static_cast<tf::Transform>(rightToLeft) 
              * transform;

            tf::StampedTransform stampedTransform(transform, curr_stamp,
                                                  reference_frame, marker_frame);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
			
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = reference_frame;
            poseMsg.header.stamp = curr_stamp;


            double aruco_roll_, aruco_pitch_, aruco_yaw_;
            tf::Quaternion aruco_quat_;
            tf::quaternionMsgToTF(poseMsg.pose.orientation, aruco_quat_);
            tf::Matrix3x3(aruco_quat_).getRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);

            //ROS_INFO("April Tag RPY: [%0.3f, %0.3f, %0.3f]", aruco_roll_*180/3.1415926, aruco_pitch_*180/3.1415926, aruco_yaw_*180/3.1415926);
//-------------------------unified coordinate systems of pose----------------------------
            aruco_yaw_ = aruco_yaw_*180/3.141592;
            printf("Original [x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n", poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);

            if (markers[i].id == 26 || markers[i].id == 58)
            {
              float PI = 3.1415926;
              float ang = PI*3/4;
              float x_0 = -0.045;//-0.015;
              float y_0 = -0.015;//0.045;
              

              poseMsg.pose.position.x = x_0 + poseMsg.pose.position.x;// + poseMsg.pose.position.x * cos(-ang) - poseMsg.pose.position.y * sin(-ang); 
              poseMsg.pose.position.y = y_0 + poseMsg.pose.position.y;// + poseMsg.pose.position.x * sin(-ang) + poseMsg.pose.position.y * cos(-ang);

              //printf("[x, y] = [%0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y);

              aruco_yaw_ = aruco_yaw_ + ang*180/PI; 
              printf("[x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);

              //printf("-----------unify the coordinate system ---------------------\n"); 
            }

            //printf("------------------------------------------------------------------\n-----------------aruco_yaw_ = %0.3f\n", aruco_yaw_);
            //printf("------------------------------------------------------------------\n");

            double temp_x = poseMsg.pose.position.x;
            double temp_y = poseMsg.pose.position.y;

            poseMsg.pose.position.x = -temp_y;
            poseMsg.pose.position.y = -temp_x;

            tf::Quaternion quat = tf::createQuaternionFromRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);
            
            poseMsg.pose.orientation.x = quat.x();
            poseMsg.pose.orientation.y = quat.y();
            poseMsg.pose.orientation.z = quat.z();
            poseMsg.pose.orientation.w = quat.w();   

            pose_pub.publish(poseMsg);


            geometry_msgs::TransformStamped transformMsg;
            tf::transformStampedTFToMsg(stampedTransform, transformMsg);
            transform_pub.publish(transformMsg);

            geometry_msgs::Vector3Stamped positionMsg;
            positionMsg.header = transformMsg.header;
            positionMsg.vector = transformMsg.transform.translation;
            position_pub.publish(positionMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,cv::Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = curr_stamp;
          out_msg.encoding = sensor_msgs::image_encodings::RGB8;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = curr_stamp;
          debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
  double ticksBefore = cv::getTickCount();
  static tf::TransformBroadcaster br;
  if(cam_info_received)
  {
    cv_bridge::CvImagePtr cv_ptr;
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
      inImage = cv_ptr->image;

      if(normalizeImageIllumination)
      {
        cv::Mat inImageNorm;
        pal_vision_util::dctNormalization(inImage, inImageNorm, dctComponentsToRemove);
        inImage = inImageNorm;
      }

      //detection results will go into "markers"
      markers.clear();
      //Ok, let's detect
      mDetector.detect(inImage, markers, camParam, marker_size);
      //for each marker, draw info and its boundaries in the image
      for(unsigned int i=0; i<markers.size(); ++i)
      {
        // only publishing the selected marker
        if ( markers[i].id == marker_id1 )
        {
          tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
          br.sendTransform(tf::StampedTransform(transform, ros::Time::now(),
                                                parent_name, child_name1));
          geometry_msgs::Pose poseMsg;
          tf::poseTFToMsg(transform, poseMsg);
          pose_pub1.publish(poseMsg);
        }
        else if ( markers[i].id == marker_id2 )
        {
          tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
          br.sendTransform(tf::StampedTransform(transform, ros::Time::now(),
                                                parent_name, child_name2));
          geometry_msgs::Pose poseMsg;
          tf::poseTFToMsg(transform, poseMsg);
          pose_pub2.publish(poseMsg);
        }

        // but drawing all the detected markers
        markers[i].draw(inImage,Scalar(0,0,255),2);
      }

      //paint a circle in the center of the image
      cv::circle(inImage, cv::Point(inImage.cols/2, inImage.rows/2), 4, cv::Scalar(0,255,0), 1);

      if ( markers.size() == 2 )
      {
        float x[2], y[2], u[2], v[2];
        for (unsigned int i = 0; i < 2; ++i)
        {
          ROS_DEBUG_STREAM("Marker(" << i << ") at camera coordinates = ("
                           << markers[i].Tvec.at<float>(0,0) << ", "
                           << markers[i].Tvec.at<float>(1,0) << ", "
                           << markers[i].Tvec.at<float>(2,0));
          //normalized coordinates of the marker
          x[i] = markers[i].Tvec.at<float>(0,0)/markers[i].Tvec.at<float>(2,0);
          y[i] = markers[i].Tvec.at<float>(1,0)/markers[i].Tvec.at<float>(2,0);
          //undistorted pixel
          u[i] = x[i]*camParam.CameraMatrix.at<float>(0,0) +
              camParam.CameraMatrix.at<float>(0,2);
          v[i] = y[i]*camParam.CameraMatrix.at<float>(1,1) +
              camParam.CameraMatrix.at<float>(1,2);
        }

        ROS_DEBUG_STREAM("Mid point between the two markers in the image = ("
                         << (x[0]+x[1])/2 << ", " << (y[0]+y[1])/2 << ")");

        //              //paint a circle in the mid point of the normalized coordinates of both markers
        //              cv::circle(inImage,
        //                         cv::Point((u[0]+u[1])/2, (v[0]+v[1])/2),
        //                         3, cv::Scalar(0,0,255), CV_FILLED);


        //compute the midpoint in 3D:
        float midPoint3D[3]; //3D point
        for (unsigned int i = 0; i < 3; ++i )
          midPoint3D[i] = ( markers[0].Tvec.at<float>(i,0) +
                            markers[1].Tvec.at<float>(i,0) ) / 2;
        //now project the 3D mid point to normalized coordinates
        float midPointNormalized[2];
        midPointNormalized[0] = midPoint3D[0]/midPoint3D[2]; //x
        midPointNormalized[1] = midPoint3D[1]/midPoint3D[2]; //y
        u[0] = midPointNormalized[0]*camParam.CameraMatrix.at<float>(0,0) +
            camParam.CameraMatrix.at<float>(0,2);
        v[0] = midPointNormalized[1]*camParam.CameraMatrix.at<float>(1,1) +
            camParam.CameraMatrix.at<float>(1,2);

        ROS_DEBUG_STREAM("3D Mid point between the two markers in undistorted pixel coordinates = ("
                         << u[0] << ", " << v[0] << ")");

        //paint a circle in the mid point of the normalized coordinates of both markers
        cv::circle(inImage,
                   cv::Point(u[0], v[0]),
                   3, cv::Scalar(0,0,255), CV_FILLED);

      }

      //draw a 3d cube in each marker if there is 3d info
      if(camParam.isValid() && marker_size!=-1)
      {
        for(unsigned int i=0; i<markers.size(); ++i)
        {
          CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
        }
      }

      if(image_pub.getNumSubscribers() > 0)
      {
        //show input with augmented information
        cv_bridge::CvImage out_msg;
        out_msg.header.stamp = ros::Time::now();
        out_msg.encoding = sensor_msgs::image_encodings::RGB8;
        out_msg.image = inImage;
        image_pub.publish(out_msg.toImageMsg());
      }

      if(debug_pub.getNumSubscribers() > 0)
      {
        //show also the internal image resulting from the threshold operation
        cv_bridge::CvImage debug_msg;
        debug_msg.header.stamp = ros::Time::now();
        debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
        debug_msg.image = mDetector.getThresholdedImage();
        debug_pub.publish(debug_msg.toImageMsg());
      }

      ROS_DEBUG("runtime: %f ms",
                1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }
  }
}
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    static tf::TransformBroadcaster br;
    if(cam_info_received_)
    {
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage_ = cv_ptr->image;

        //clear out previous detection results
        markers_.clear();
        marker_msg_->markers.clear();

        //Ok, let's detect
        mDetector_.detect(inImage_, markers_, camParam_, marker_size_, false);
        marker_msg_->markers.resize(markers_.size());

        ros::Time curr_stamp(ros::Time::now());
        marker_msg_->header.stamp = curr_stamp;
        marker_msg_->header.seq++;

        //get the current transform from the camera frame to output ref frame
        tf::StampedTransform cameraToReference;
        cameraToReference.setIdentity();

        if ( reference_frame_ != camera_frame_ )
        {
          getTransform(reference_frame_,
              camera_frame_,
              cameraToReference);
        }

        //Now find the transform for each detected marker
        for(size_t i=0; i < markers_.size(); ++i)
        {
          tf::Transform transform = aruco_ros::arucoMarker2Tf(markers_[i]);
          transform = static_cast<tf::Transform>(cameraToReference) * transform;

          //Maybe change this to broadcast separate transforms for each marker?
          //tf::StampedTransform stampedTransform(transform, curr_stamp,
              //reference_frame_, marker_frame_);
          //br.sendTransform(stampedTransform);

          aruco_msgs::Marker & marker_i = marker_msg_->markers.at(i);
          tf::poseTFToMsg(transform, marker_i.pose.pose);
          marker_i.header.frame_id = reference_frame_;
          marker_i.header.stamp = curr_stamp;
          marker_i.id = markers_.at(i).id;
          marker_i.confidence = 1.0;

          markers_[i].draw(inImage_,cv::Scalar(0,0,255),2);
        }

        //publish marker array
        if (marker_msg_->markers.size() > 0)
          marker_pub_.publish(marker_msg_);

        //draw a 3d cube in each marker if there is 3d info
        if(camParam_.isValid() && marker_size_!=-1)
        {
          for(size_t i=0; i<markers_.size(); ++i)
            aruco::CvDrawingUtils::draw3dAxis(inImage_, markers_[i], camParam_);
        }

        if(image_pub_.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = curr_stamp;
          out_msg.encoding = sensor_msgs::image_encodings::RGB8;
          out_msg.image = inImage_;
          image_pub_.publish(out_msg.toImageMsg());
        }

        if(debug_pub_.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = curr_stamp;
          debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
          debug_msg.image = mDetector_.getThresholdedImage();
          debug_pub_.publish(debug_msg.toImageMsg());
        }
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
      }
    }
  }
		void image_callback(const sensor_msgs::ImageConstPtr& msg)
		{
            static tf::TransformBroadcaster br;
            
			if(!cam_info_received) return;

			cv_bridge::CvImagePtr cv_ptr;
			try
			{
				cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
				inImage = cv_ptr->image;
				resultImg = cv_ptr->image.clone();

				//detection results will go into "markers"
				markers.clear();
				//Ok, let's detect
				mDetector.detect(inImage, markers, camParam, marker_size, false);
				//Detection of the board
				float probDetect=the_board_detector.detect(markers, the_board_config, the_board_detected, camParam, marker_size);
				if (probDetect > 0.0)
				{
					foundBoard = true; //added///////////////////////
					tf::Transform transform = ar_sys::getTf(the_board_detected.Rvec, the_board_detected.Tvec);

					tf::StampedTransform stampedTransform(transform, msg->header.stamp, msg->header.frame_id, board_frame);
                    
                    			if (publish_tf) 
						br.sendTransform(stampedTransform);

					geometry_msgs::PoseStamped poseMsg;
					tf::poseTFToMsg(transform, poseMsg.pose);
					poseMsg.header.frame_id = msg->header.frame_id;
					poseMsg.header.stamp = msg->header.stamp;
					pose_pub.publish(poseMsg);

					geometry_msgs::TransformStamped transformMsg;
					tf::transformStampedTFToMsg(stampedTransform, transformMsg);
					transform_pub.publish(transformMsg);

					geometry_msgs::Vector3Stamped positionMsg;
					positionMsg.header = transformMsg.header;
					positionMsg.vector = transformMsg.transform.translation;
					position_pub.publish(positionMsg);
					
					std_msgs::Float32 boardSizeMsg;
					boardSizeMsg.data=the_board_detected[0].ssize;
					boardSize_pub.publish(boardSizeMsg);				
					
									
				}
				//ADDED////////////////////////////////////////////////////////////////////////////
				if(rvec_pub.getNumSubscribers() > 0 && foundBoard)
				{
					cv_bridge::CvImage rvecMsg;
					rvecMsg.header.frame_id = msg->header.frame_id;
					rvecMsg.header.stamp = msg->header.stamp;
					rvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
					rvecMsg.image = the_board_detected[0].Rvec;
					rvec_pub.publish(rvecMsg.toImageMsg());
				}
	
				if(tvec_pub.getNumSubscribers() > 0 && foundBoard)
				{
					cv_bridge::CvImage tvecMsg;
					tvecMsg.header.frame_id = msg->header.frame_id;
					tvecMsg.header.stamp = msg->header.stamp;
					tvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
					tvecMsg.image = the_board_detected[0].Tvec;
					tvec_pub.publish(tvecMsg.toImageMsg());
				}
				///////////////////////////////////////////////////////////////////////////////
				
				//for each marker, draw info and its boundaries in the image
				for(size_t i=0; draw_markers && i < markers.size(); ++i)
				{
					
					markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
				}


				if(camParam.isValid() && marker_size != -1)
				{
					//draw a 3d cube in each marker if there is 3d info
					for(size_t i=0; i<markers.size(); ++i)
					{
						if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
						if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
					}
					//draw board axis
					if (probDetect > 0.0)
					{ 
						CvDrawingUtils::draw3dAxis(resultImg, the_board_detected, camParam);
						
					}
				}

				if(image_pub.getNumSubscribers() > 0)
				{
					//show input with augmented information
					cv_bridge::CvImage out_msg;
					out_msg.header.frame_id = msg->header.frame_id;
					out_msg.header.stamp = msg->header.stamp;
					out_msg.encoding = sensor_msgs::image_encodings::RGB8;
					out_msg.image = resultImg;
					image_pub.publish(out_msg.toImageMsg());
				}

				if(debug_pub.getNumSubscribers() > 0)
				{
					//show also the internal image resulting from the threshold operation
					cv_bridge::CvImage debug_msg;
					debug_msg.header.frame_id = msg->header.frame_id;
					debug_msg.header.stamp = msg->header.stamp;
					debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
					debug_msg.image = mDetector.getThresholdedImage();
					debug_pub.publish(debug_msg.toImageMsg());
				}
			}
			catch (cv_bridge::Exception& e)
			{
				ROS_ERROR("cv_bridge exception: %s", e.what());
				return;
			}
		}
		void image_callback(const sensor_msgs::ImageConstPtr& msg)
		{
			if(!cam_info_received) return;

			cv_bridge::CvImagePtr cv_ptr;
			try
			{
				cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
				inImage = cv_ptr->image;
				resultImg = cv_ptr->image.clone();

				//detection results will go into "markers"
				markers.clear();

				//Ok, let's detect
				double min_size = boards[0].marker_size;
				for (int board_index = 1; board_index < boards.size(); board_index++)
					if (min_size > boards[board_index].marker_size) min_size = boards[board_index].marker_size;
				mDetector.detect(inImage, markers, camParam, min_size, false);


				for (int board_index = 0; board_index < boards.size(); board_index++)
				{
					Board board_detected;

					//Detection of the board
					float probDetect = the_board_detector.detect(markers, boards[board_index].config, board_detected, camParam, boards[board_index].marker_size);
					if (probDetect > 0.0)
					{
						tf::Transform transform = ar_sys::getTf(board_detected.Rvec, board_detected.Tvec);

						tf::StampedTransform stampedTransform(transform, ros::Time::now(), "apollon_cam_right", boards[board_index].name +"_right");

						//_tfBraodcaster.sendTransform(stampedTransform);		// from phillip

						/*geometry_msgs::PoseStamped poseMsg;
						tf::poseTFToMsg(transform, poseMsg.pose);
						poseMsg.header.frame_id = msg->header.frame_id;
						poseMsg.header.stamp = msg->header.stamp;
						pose_pub.publish(poseMsg);*/

						geometry_msgs::TransformStamped transformMsg;
						tf::transformStampedTFToMsg(stampedTransform, transformMsg);
						transform_pub.publish(transformMsg);

						/*geometry_msgs::Vector3Stamped positionMsg;
						positionMsg.header = transformMsg.header;
						positionMsg.vector = transformMsg.transform.translation;
						position_pub.publish(positionMsg);*/

						if(camParam.isValid())
						{
							//draw board axis
							CvDrawingUtils::draw3dAxis(resultImg, board_detected, camParam);
						}
					}
				}

				//for each marker, draw info and its boundaries in the image
				for(size_t i=0; draw_markers && i < markers.size(); ++i)
				{
					markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
				}

				if(camParam.isValid())
				{
					//draw a 3d cube in each marker if there is 3d info
					for(size_t i=0; i<markers.size(); ++i)
					{
						if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
						if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
					}
				}

				if(image_pub.getNumSubscribers() > 0)
				{
					//show input with augmented information
					cv_bridge::CvImage out_msg;
					out_msg.header.frame_id = msg->header.frame_id;
					out_msg.header.stamp = msg->header.stamp;
					out_msg.encoding = sensor_msgs::image_encodings::RGB8;
					out_msg.image = resultImg;
					image_pub.publish(out_msg.toImageMsg());
				}

				if(debug_pub.getNumSubscribers() > 0)
				{
					//show also the internal image resulting from the threshold operation
					cv_bridge::CvImage debug_msg;
					debug_msg.header.frame_id = msg->header.frame_id;
					debug_msg.header.stamp = msg->header.stamp;
					debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
					debug_msg.image = mDetector.getThresholdedImage();
					debug_pub.publish(debug_msg.toImageMsg());
				}
			}
			catch (cv_bridge::Exception& e)
			{
				ROS_ERROR("cv_bridge exception: %s", e.what());
				return;
			}
		}
Beispiel #12
0
 void Marker::calculateExtrinsics(float markerSize,const aruco::CameraParameters &CP)throw(cv::Exception)
 {
     if (!CP.isValid()) throw cv::Exception(9004,"!CP.isValid(): invalid camera parameters. It is not possible to calculate extrinsics","calculateExtrinsics",__FILE__,__LINE__);
     calculateExtrinsics( markerSize,CP.CameraMatrix,CP.Distorsion);
 }
Beispiel #13
0
int main() {
	
	

	// 打开摄像头
	video_capture.open(0);
	if (!video_capture.isOpened()){
		std::cerr << "Could not open video" << endl;
		return -1;
	}

	// 获取第一张图像,用于这设置参数
	video_capture >> input_image;

	// 读取摄像机参数
	camera_params.readFromXMLFile("camera.yml");
	camera_params.resize(input_image.size());

	// 注册窗口
	cv::namedWindow("thres",1);
	cv::namedWindow("in",1);

	marker_dector.getThresholdParams(threshold_param1, threshold_param2);
	i_threshold_param1 = threshold_param1;
	i_threshold_param2 = threshold_param2;

	cv::createTrackbar("ThresParam1", "in",&i_threshold_param1, 13, cvTackBarEvents);
	cv::createTrackbar("ThresParam2", "in",&i_threshold_param2, 13, cvTackBarEvents);

	char key=0;
	int index=0;

	//capture until press ESC or until the end of the video
	while( key!=27 && video_capture.grab())
	{
		//copy image
		video_capture.retrieve(input_image);
		
		//number of images captured
		index++; 

		//for checking the speed
		double tick = (double)cv::getTickCount();
		//Detection of markers in the image passed
		marker_dector.detect(input_image, markers, camera_params, marker_size);
		//chekc the speed by calculating the mean speed of all iterations
		average_time.first += ((double)cv::getTickCount() - tick) / cv::getTickFrequency();
		average_time.second++;
		std::cout << "Time detection=" << 1000 * average_time.first / average_time.second << " milliseconds" << endl;

		//print marker info and draw the markers in image
		input_image.copyTo(input_image_copy);
		for(unsigned int i = 0; i < markers.size(); i++){
			std::cout << markers[i] << std::endl;
			markers[i].draw(input_image, Scalar(0, 0, 255), 2);
		}

		//draw a 3d cube in each marker if there is 3d info
		if ( camera_params.isValid())
			for(unsigned int i = 0; i < markers.size(); i++){
				aruco::CvDrawingUtils::draw3dCube(input_image, markers[i], camera_params);
				aruco::CvDrawingUtils::draw3dAxis(input_image, markers[i], camera_params);
			}

			//DONE! Easy, right?
			cout << endl << endl << endl;
			//show input with augmented information and  the thresholded image
			cv::imshow("in", input_image);
			cv::imshow("thres", marker_dector.getThresholdedImage());

			key=cv::waitKey(0);//wait for key to be pressed
	}
	return 0;
}
Beispiel #14
0
int initOgreAR(aruco::CameraParameters camParams, unsigned char* buffer, std::string resourcePath)
{
	/// INIT OGRE FUNCTIONS
#ifdef _WIN32
  	root = new Ogre::Root(resourcePath + "plugins_win.cfg", resourcePath + "ogre_win.cfg");
#elif __x86_64__ || __ppc64__
	root = new Ogre::Root(resourcePath + "plugins_x64.cfg", resourcePath + "ogre.cfg");
#else
	root = new Ogre::Root(resourcePath + "plugins.cfg", resourcePath + "ogre.cfg");
#endif
  	if (!root->showConfigDialog()) return -1;
	Ogre::SceneManager* smgr = root->createSceneManager(Ogre::ST_GENERIC);


	/// CREATE WINDOW, CAMERA AND VIEWPORT
    Ogre::RenderWindow* window = root->initialise(true);
	Ogre::Camera *camera;
	Ogre::SceneNode* cameraNode;
	camera = smgr->createCamera("camera");
	camera->setNearClipDistance(0.01f);
	camera->setFarClipDistance(10.0f);
	camera->setProjectionType(Ogre::PT_ORTHOGRAPHIC);
	camera->setPosition(0, 0, 0);
	camera->lookAt(0, 0, 1);
	double pMatrix[16];
	camParams.OgreGetProjectionMatrix(camParams.CamSize,camParams.CamSize, pMatrix, 0.05,10, false);
	Ogre::Matrix4 PM(pMatrix[0], pMatrix[1], pMatrix[2] , pMatrix[3],
			pMatrix[4], pMatrix[5], pMatrix[6] , pMatrix[7],
			pMatrix[8], pMatrix[9], pMatrix[10], pMatrix[11],
			pMatrix[12], pMatrix[13], pMatrix[14], pMatrix[15]);
	camera->setCustomProjectionMatrix(true, PM);
	camera->setCustomViewMatrix(true, Ogre::Matrix4::IDENTITY);
	window->addViewport(camera);
	cameraNode = smgr->getRootSceneNode()->createChildSceneNode("cameraNode");
	cameraNode->attachObject(camera);


	/// CREATE BACKGROUND FROM CAMERA IMAGE
	int width = camParams.CamSize.width;
	int height = camParams.CamSize.height;
	// create background camera image
	mPixelBox = Ogre::PixelBox(width, height, 1, Ogre::PF_R8G8B8, buffer);
	// Create Texture
	mTexture = Ogre::TextureManager::getSingleton().createManual("CameraTexture",Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
		      Ogre::TEX_TYPE_2D,width,height,0,Ogre::PF_R8G8B8,Ogre::TU_DYNAMIC_WRITE_ONLY_DISCARDABLE);

	//Create Camera Material
	Ogre::MaterialPtr material = Ogre::MaterialManager::getSingleton().create("CameraMaterial", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME);
	Ogre::Technique *technique = material->createTechnique();
	technique->createPass();
	material->getTechnique(0)->getPass(0)->setLightingEnabled(false);
	material->getTechnique(0)->getPass(0)->setDepthWriteEnabled(false);
	material->getTechnique(0)->getPass(0)->createTextureUnitState("CameraTexture");

	Ogre::Rectangle2D* rect = new Ogre::Rectangle2D(true);
	rect->setCorners(-1.0, 1.0, 1.0, -1.0);
	rect->setMaterial("CameraMaterial");

	// Render the background before everything else
	rect->setRenderQueueGroup(Ogre::RENDER_QUEUE_BACKGROUND);

	// Hacky, but we need to set the bounding box to something big, use infinite AAB to always stay visible
	Ogre::AxisAlignedBox aabInf;
	aabInf.setInfinite();
	rect->setBoundingBox(aabInf);

	// Attach background to the scene
	Ogre::SceneNode* node = smgr->getRootSceneNode()->createChildSceneNode("Background");
	node->attachObject(rect);


	/// CREATE SIMPLE OGRE SCENE
	// add sinbad.mesh
	Ogre::ResourceGroupManager::getSingleton().addResourceLocation(resourcePath + "Sinbad.zip", "Zip", "Popular");
 	Ogre::ResourceGroupManager::getSingleton().initialiseAllResourceGroups();
	for(int i=0; i<MAX_MARKERS; i++) {
	  Ogre::String entityName = "Marker_" + Ogre::StringConverter::toString(i);
	  Ogre::Entity* ogreEntity = smgr->createEntity(entityName, "Sinbad.mesh");
	  Ogre::Real offset = ogreEntity->getBoundingBox().getHalfSize().y;
	  ogreNode[i] = smgr->getRootSceneNode()->createChildSceneNode();
	  // add entity to a child node to correct position (this way, entity axis is on feet of sinbad)
	  Ogre::SceneNode *ogreNodeChild = ogreNode[i]->createChildSceneNode();
	  ogreNodeChild->attachObject(ogreEntity);
	  // Sinbad is placed along Y axis, we need to rotate to put it along Z axis so it stands up over the marker
	  // first rotate along X axis, then add offset in Z dir so it is over the marker and not in the middle of it
	  ogreNodeChild->rotate(Ogre::Vector3(1,0,0), Ogre::Radian(Ogre::Degree(90)));
	  ogreNodeChild->translate(0,0,offset,Ogre::Node::TS_PARENT);
	  // mesh is too big, rescale!
	  const float scale = 0.006675f;
	  ogreNode[i]->setScale(scale, scale, scale);

	    // Init animation
	  ogreEntity->getSkeleton()->setBlendMode(Ogre::ANIMBLEND_CUMULATIVE);
	  if(i==0)
	  {
        baseAnim[i] = ogreEntity->getAnimationState("HandsClosed");
        topAnim[i] = ogreEntity->getAnimationState("HandsRelaxed");
	  }
	  else if(i==1)
	  {
        baseAnim[i] = ogreEntity->getAnimationState("Dance");
        topAnim[i] = ogreEntity->getAnimationState("Dance");
	  }
	  else if(i==2)
	  {
        baseAnim[i] = ogreEntity->getAnimationState("RunBase");
        topAnim[i] = ogreEntity->getAnimationState("RunTop");
	  }
	  else
	  {
        baseAnim[i] = ogreEntity->getAnimationState("IdleBase");
        topAnim[i] = ogreEntity->getAnimationState("IdleTop");
	  }
	  baseAnim[i]->setLoop(true);
	  topAnim[i]->setLoop(true);
	  baseAnim[i]->setEnabled(true);
	  topAnim[i]->setEnabled(true);
	}


 	/// KEYBOARD INPUT READING
 	size_t windowHnd = 0;
 	window->getCustomAttribute("WINDOW", &windowHnd);
 	im = OIS::InputManager::createInputSystem(windowHnd);
 	keyboard = static_cast<OIS::Keyboard*>(im->createInputObject(OIS::OISKeyboard, true));

	return 1;
}