static void cleanup(void)
{
    // Clean up the grayscale image.
    cvReleaseImageHeader(&calibImage);
    if (arIPI)
    {
        arImageProcFinal(arIPI);
        arIPI = NULL;
    }

    // Free space for results.
    if (corners)
    {
        free(corners);
        corners = NULL;
    }

    if (cornerSet)
    {
        free(cornerSet);
        cornerSet = NULL;
    }

    arVideoCapStop();
    arVideoClose();
    argCleanup();

    if (cwd)
    {
        free(cwd);
        cwd = NULL;
    }

    exit(0);
}
Beispiel #2
0
/* cleanup function called when program exits */
static void cleanup(void)
{
    arParamLTFree(&gCparamLT);
    arVideoCapStop();
    arVideoClose();
    argCleanup();
}
Beispiel #3
0
void Cleanup(void)
{
	arVideoCapStop();
	arVideoClose();
	argCleanup();

}
/* cleanup function called when program exits */
static void cleanup(void)
{
	arVideoCapStop();
    arVideoClose();
    argCleanup();
	//clean MyKinect
	g_MyKinect.Exit();
}
Beispiel #5
0
static void cleanup(void)
{
    ar2VideoCapStop(vidL);
    ar2VideoCapStop(vidR);
    ar2VideoClose(vidL);
    ar2VideoClose(vidR);
    argCleanup();
    exit(0);
}
Beispiel #6
0
/* cleanup function called when program exits */
static void cleanup(void)
{
	closesocket(s);
	closesocket(s2);
	WSACleanup();
	arVideoCapStop();
	arVideoClose();
	argCleanup();
}
/* cleanup function called when program exits */
static void cleanup(void)
{
    arVideoCapStop();
    arVideoClose();
    argCleanup();
	

	closedir (dp);
}
/* cleanup function called when program exits */
static void cleanup(void)
{
	//arVideoCapStop();
    arVideoClose();
    argCleanup();
	if(g_pRightHandShotDetector)
		delete g_pRightHandShotDetector;
	//clean MyKinect
	g_MyKinect.Exit();
}
Beispiel #9
0
///===================================================================================================================
/// fin du prog ( touche esc)
///===================================================================================================================
static void cleanup()
{
	printf("Extinction \n");
    arVideoCapStop();
    arVideoClose();
    argCleanup();
	/*FMOD_System_Close(systemSon);
	FMOD_System_Release(systemSon);*/
	exit(0);
}
Beispiel #10
0
void CleanUp(){
    nyar_NyARTransMat_O2_free(nyobj);
    mqoDeleteModel(WallModel);
    for (int i=0;i<box_variey_num;i++){
	mqoDeleteModel(Box[i]);
    }
    mqoCleanup();
    arVideoCapStop();
    arVideoClose();
    argCleanup();
}
Beispiel #11
0
static void keyFunc( unsigned char key, int x, int y )
{
    if( key == 0x1b ) {
        argCleanup();
        exit(0);
    }

    if( key == ' ' ) {
        page++;
#if AR2_CAPABLE_ADAPTIVE_TEMPLATE
        page = page % (imageSet->num*(AR2_BLUR_IMAGE_MAX));
#else
        page = page % (imageSet->num);
#endif
        reportCurrentDPI();
        dispFunc();
    }
}
Beispiel #12
0
/* cleanup function called when program exits */
static void cleanup(void)
{
    argCleanup();
}
	static void cleanup() {
		arVideoCapStop();
		arVideoClose();
		argCleanup();
	}
Beispiel #14
0
  /* 
   * One and only one callback, now takes cloud, does everything else needed. 
   */
  void ARPublisher::getTransformationCallback (const sensor_msgs::PointCloud2ConstPtr & msg)
  {
    sensor_msgs::ImagePtr image_msg(new sensor_msgs::Image);
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k, j;

    /* do we need to initialize? */
    if(!configured_)
    {
      if(msg->width == 0 || msg->height == 0)
      {
        ROS_ERROR ("Deformed cloud! Size = %d, %d.", msg->width, msg->height);
        return;
      }

      cam_param_.xsize = msg->width;
      cam_param_.ysize = msg->height;

      cam_param_.dist_factor[0] = msg->width/2;         // x0 = cX from openCV calibration
      cam_param_.dist_factor[1] = msg->height/2;        // y0 = cY from openCV calibration
      cam_param_.dist_factor[2] = 0;                    // f = -100*k1 from CV. Note, we had to do mm^2 to m^2, hence 10^8->10^2
      cam_param_.dist_factor[3] = 1.0;                  // scale factor, should probably be >1, but who cares...
      
      arInit ();
    }

    /* convert cloud to PCL */
    PointCloud cloud;
    pcl::fromROSMsg(*msg, cloud);
 
    /* get an OpenCV image from the cloud */
    pcl::toROSMsg (cloud, *image_msg);

    cv_bridge::CvImagePtr cv_ptr;
    try
    {
        cv_ptr = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR ("Could not convert from '%s' to 'bgr8'.", image_msg->encoding.c_str ());
    }
    dataPtr = (ARUint8 *) cv_ptr->image.ptr();

    /* detect the markers in the video frame */
    if (arDetectMarkerLite (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      return;
    }
 
    arPoseMarkers_.markers.clear ();
    /* check for known patterns */
    for (i = 0; i < objectnum; i++)
    {
      k = -1;
      for (j = 0; j < marker_num; j++)
      {
        if (object[i].id == marker_info[j].id)
        {
          if (k == -1)
            k = j;
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }
      if (k == -1)
      {
        object[i].visible = 0;
        continue;
      }
      
      /* create a cloud for marker corners */
      int d = marker_info[k].dir;
      PointCloud marker;
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(4-d)%4][0], (int)marker_info[k].vertex[(4-d)%4][1] ) ); // upper left
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(5-d)%4][0], (int)marker_info[k].vertex[(5-d)%4][1] ) ); // upper right
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(6-d)%4][0], (int)marker_info[k].vertex[(6-d)%4][1] ) ); // lower right
      marker.push_back( cloud.at( (int)marker_info[k].vertex[(7-d)%4][0], (int)marker_info[k].vertex[(7-d)%4][1] ) );

      /* create an ideal cloud */
      double w = object[i].marker_width;
      PointCloud ideal;
      ideal.push_back( makeRGBPoint(-w/2,w/2,0) );
      ideal.push_back( makeRGBPoint(w/2,w/2,0) );
      ideal.push_back( makeRGBPoint(w/2,-w/2,0) );
      ideal.push_back( makeRGBPoint(-w/2,-w/2,0) );

      /* get transformation */
      Eigen::Matrix4f t;
      TransformationEstimationSVD obj;
      obj.estimateRigidTransformation( marker, ideal, t );

      
      /* get final transformation */
      tf::Transform transform = tfFromEigen(t.inverse());
   
      // any(transform == nan)
      tf::Matrix3x3  m = transform.getBasis();
      tf::Vector3    p = transform.getOrigin();
      bool invalid = false;
      for(int i=0; i < 3; i++)
        for(int j=0; j < 3; j++)
          invalid = (invalid || isnan(m[i][j]) || fabs(m[i][j]) > 1.0);

      for(int i=0; i < 3; i++)
          invalid = (invalid || isnan(p[i]));
       

      if(invalid)
        continue; 

      /* publish the marker */
      ar_pose::ARMarker ar_pose_marker;
      ar_pose_marker.header.frame_id = msg->header.frame_id;
      ar_pose_marker.header.stamp = msg->header.stamp;
      ar_pose_marker.id = object[i].id;

      ar_pose_marker.pose.pose.position.x = transform.getOrigin().getX();
      ar_pose_marker.pose.pose.position.y = transform.getOrigin().getY();
      ar_pose_marker.pose.pose.position.z = transform.getOrigin().getZ();

      ar_pose_marker.pose.pose.orientation.x = transform.getRotation().getAxis().getX();
      ar_pose_marker.pose.pose.orientation.y = transform.getRotation().getAxis().getY();
      ar_pose_marker.pose.pose.orientation.z = transform.getRotation().getAxis().getZ();
      ar_pose_marker.pose.pose.orientation.w = transform.getRotation().getW();

      ar_pose_marker.confidence = marker_info->cf;
      arPoseMarkers_.markers.push_back (ar_pose_marker);

      /* publish transform */
      if (publishTf_)
      {
	    broadcaster_.sendTransform(tf::StampedTransform(transform, msg->header.stamp, msg->header.frame_id, object[i].name));
      }

      /* publish visual marker */

      if (publishVisualMarkers_)
      {
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = transform * m; // marker pose in the camera frame

        tf::poseTFToMsg (markerPose, rvizMarker_.pose);

        rvizMarker_.header.frame_id = msg->header.frame_id;
        rvizMarker_.header.stamp = msg->header.stamp;
        rvizMarker_.id = object[i].id;

        rvizMarker_.scale.x = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.y = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.z = 0.5 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.ns = "basic_shapes";
        rvizMarker_.type = visualization_msgs::Marker::CUBE;
        rvizMarker_.action = visualization_msgs::Marker::ADD;
        switch (i)
        {
          case 0:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 1.0f;
            rvizMarker_.color.a = 1.0;
            break;
          case 1:
            rvizMarker_.color.r = 1.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
            break;
          default:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 1.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
        }
        rvizMarker_.lifetime = ros::Duration ();

        rvizMarkerPub_.publish (rvizMarker_);
        ROS_DEBUG ("Published visual marker");
      }
    }
    arMarkerPub_.publish (arPoseMarkers_);
    ROS_DEBUG ("Published ar_multi markers");
  }
Beispiel #15
0
  void ARMultiPublisher::getTransformationCallback (const sensor_msgs::ImageConstPtr & image_msg)
  {
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int i, k, j;

    /* Get the image from ROSTOPIC
     * NOTE: the dataPtr format is BGR because the ARToolKit library was
     * build with V4L, dataPtr format change according to the
     * ARToolKit configure option (see config.h).*/
#if ROS_VERSION_MINIMUM(1, 9, 0)
    try
    {
      capture_ = cv_bridge::toCvCopy (image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
    }
    dataPtr = (ARUint8 *) ((IplImage) capture_->image).imageData;
#else
    try
    {
      capture_ = bridge_.imgMsgToCv (image_msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException & e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
    }
    dataPtr = (ARUint8 *) capture_->imageData;
#endif

    // detect the markers in the video frame
    if (arDetectMarker (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      ROS_BREAK ();
    }

    arPoseMarkers_.markers.clear ();
    // check for known patterns
    for (i = 0; i < objectnum; i++)
    {
      k = -1;
      for (j = 0; j < marker_num; j++)
      {
        if (object[i].id == marker_info[j].id)
        {
          if (k == -1)
            k = j;
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }
      if (k == -1)
      {
        object[i].visible = 0;
        continue;
      }

      // calculate the transform for each marker
      if (object[i].visible == 0)
      {
        arGetTransMat (&marker_info[k], object[i].marker_center, object[i].marker_width, object[i].trans);
      }
      else
      {
        arGetTransMatCont (&marker_info[k], object[i].trans,
                           object[i].marker_center, object[i].marker_width, object[i].trans);
      }
      object[i].visible = 1;

      double arQuat[4], arPos[3];

      //arUtilMatInv (object[i].trans, cam_trans);
      arUtilMat2QuatPos (object[i].trans, arQuat, arPos);

      // **** convert to ROS frame

      double quat[4], pos[3];

      pos[0] = arPos[0] * AR_TO_ROS;
      pos[1] = arPos[1] * AR_TO_ROS;
      pos[2] = arPos[2] * AR_TO_ROS;

      quat[0] = -arQuat[0];
      quat[1] = -arQuat[1];
      quat[2] = -arQuat[2];
      quat[3] = arQuat[3];

      ROS_DEBUG (" Object num %i ------------------------------------------------", i);
      ROS_DEBUG (" QUAT: Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
      ROS_DEBUG ("     Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

      // **** publish the marker

      ar_pose::ARMarker ar_pose_marker;
      ar_pose_marker.header.frame_id = image_msg->header.frame_id;
      ar_pose_marker.header.stamp = image_msg->header.stamp;
      ar_pose_marker.id = object[i].id;

      ar_pose_marker.pose.pose.position.x = pos[0];
      ar_pose_marker.pose.pose.position.y = pos[1];
      ar_pose_marker.pose.pose.position.z = pos[2];

      ar_pose_marker.pose.pose.orientation.x = quat[0];
      ar_pose_marker.pose.pose.orientation.y = quat[1];
      ar_pose_marker.pose.pose.orientation.z = quat[2];
      ar_pose_marker.pose.pose.orientation.w = quat[3];

      ar_pose_marker.confidence = round(marker_info->cf * 100);
      arPoseMarkers_.markers.push_back (ar_pose_marker);

      // **** publish transform between camera and marker

#if ROS_VERSION_MINIMUM(1, 9, 0)
      tf::Quaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      tf::Vector3 origin (pos[0], pos[1], pos[2]);
      tf::Transform t (rotation, origin);
#else
// DEPRECATED: Fuerte support ends when Hydro is released
      btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      btVector3 origin (pos[0], pos[1], pos[2]);
      btTransform t (rotation, origin);
#endif

      if (publishTf_)
      {
        tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, object[i].name);
        broadcaster_.sendTransform(camToMarker);
      }

      // **** publish visual marker

      if (publishVisualMarkers_)
      {
#if ROS_VERSION_MINIMUM(1, 9, 0)
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = t * m; // marker pose in the camera frame
#else
// DEPRECATED: Fuerte support ends when Hydro is released
        btVector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        btTransform m (btQuaternion::getIdentity (), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame
#endif

        tf::poseTFToMsg (markerPose, rvizMarker_.pose);

        rvizMarker_.header.frame_id = image_msg->header.frame_id;
        rvizMarker_.header.stamp = image_msg->header.stamp;
        rvizMarker_.id = object[i].id;

        rvizMarker_.scale.x = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.y = 1.0 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.scale.z = 0.5 * object[i].marker_width * AR_TO_ROS;
        rvizMarker_.ns = "basic_shapes";
        rvizMarker_.type = visualization_msgs::Marker::CUBE;
        rvizMarker_.action = visualization_msgs::Marker::ADD;
        switch (i)
        {
          case 0:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 1.0f;
            rvizMarker_.color.a = 1.0;
            break;
          case 1:
            rvizMarker_.color.r = 1.0f;
            rvizMarker_.color.g = 0.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
            break;
          default:
            rvizMarker_.color.r = 0.0f;
            rvizMarker_.color.g = 1.0f;
            rvizMarker_.color.b = 0.0f;
            rvizMarker_.color.a = 1.0;
        }
        rvizMarker_.lifetime = ros::Duration (1.0);

        rvizMarkerPub_.publish(rvizMarker_);
        ROS_DEBUG ("Published visual marker");
      }
    }
    arMarkerPub_.publish(arPoseMarkers_);
    ROS_DEBUG ("Published ar_multi markers");
  }
Beispiel #16
0
int main(int argc, char **argv)
{
    ARUint8 *dataPtr;

    //
    // Camera configuration.
    //
#ifdef _WIN32
    char			*vconf = "Data\\WDM_camera_flipV.xml";
#else
    char			*vconf = "";
#endif

    int             count = 0;

   // char           *cparam_name    = "../calib_camera2/newparam.dat";

    char path[100];
    char cparam_name[100];
    char* cur_filename;
   
    if (argc < 2) {
        fprintf(stderr,"provide a jpeg file name\n");
        return;
    }
    

    int one = 1;
    //glutInit(&one,argv);


    cur_filename = argv[1];
    frame_ind = atoi(argv[3]);

    if (argc > 2) {
        sprintf(path,"%s/", argv[2]);
    } else {
        sprintf(path, "");
    }

    fprintf(stderr,"%d %s,\n", argc, cur_filename);

    /// make this get an image with curl
	dataPtr	    = loadImage(cur_filename,&xsize,&ysize);

	ARParam  wparam;

    sprintf(cparam_name,"%s%s",path,"camera_para.dat");
    fprintf(stderr,"%s\n", cparam_name);

    /* set the initial camera parameters */
    if( arParamLoad(cparam_name, 1, &wparam) < 0 ) {
    //if( arParamLoad("camera_para.dat", 1, &wparam) < 0 ) {
        fprintf(stderr,"Camera parameter load error !!\n");
        exit(0);
    }
    arParamChangeSize( &wparam, xsize, ysize, &cparam );
    arInitCparam( &cparam );
    //fprintf(stderr,"*** Camera Parameter ***\n");
    //arParamDisp( &cparam );

    int patt_id;
    char buffer[100];
    
    sprintf(buffer,"%spatt.hiro",path);
    if( (patt_id=arLoadPatt(buffer)) < 0 ) {
        fprintf(stderr,"pattern load error !!\n");
        exit(0);
    }
   fprintf(stderr,"patt.hiro %d\n", patt_id);

    sprintf(buffer,"%spatt.sample1",path);
    if( (patt_id=arLoadPatt(buffer)) < 0 ) {
        fprintf(stderr,"pattern load error !!\n");
        exit(0);
    }
   fprintf(stderr,"patt.sample1 %d\n", patt_id);

    sprintf(buffer,"%spatt.sample2",path);
    if( (patt_id=arLoadPatt(buffer)) < 0 ) {
        fprintf(stderr,"pattern load error !!\n");
        exit(0);
    }
   fprintf(stderr,"patt.sample2 %d\n", patt_id);


    sprintf(buffer,"%spatt.kanji",path);
    if( (patt_id=arLoadPatt(buffer)) < 0 ) {
        fprintf(stderr,"pattern load error !!\n");
        exit(0);
    }
   fprintf(stderr,"patt.kanji %d\n", patt_id);


#if 0
	fprintf(stderr,"xysize %d %d\n\
cparam %g\t%g\t%g\t%g\n \
mat\n \
%g\t%g\t%g\t%g\n \
%g\t%g\t%g\t%g\n \
%g\t%g\t%g\t%g\n", 
		cparam.xsize, cparam.ysize,
		cparam.dist_factor[0], cparam.dist_factor[1], cparam.dist_factor[2], cparam.dist_factor[3], 
		cparam.mat[0][0], cparam.mat[0][1], cparam.mat[0][2], cparam.mat[0][3], 	
		cparam.mat[1][0], cparam.mat[1][1], cparam.mat[1][2], cparam.mat[1][3], 	
		cparam.mat[2][0], cparam.mat[2][1], cparam.mat[2][2], cparam.mat[2][3]	
		);
#endif

    /* open the graphics window */
    //argInit( &cparam, 1.0, 0, 0, 0, 0 );

    //printf("%s,\t\n",cur_filename);
	findMarkers(dataPtr);

    argCleanup();
}
Beispiel #17
0
  void ARBundlePublisher::getTransformationCallback (const sensor_msgs::ImageConstPtr & image_msg)
  {
    ARUint8 *dataPtr;
    ARMarkerInfo *marker_info;
    int marker_num;
    int knownPatternCount, k, j;

    /* Get the image from ROSTOPIC
     * NOTE: the dataPtr format is BGR because the ARToolKit library was
     * build with V4L, dataPtr format change according to the 
     * ARToolKit configure option (see config.h).*/
#if ROS_VERSION_MINIMUM(1, 9, 0)
    try
    {
      capture_ = cv_bridge::toCvCopy (image_msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what()); 
    }
    dataPtr = (ARUint8 *) ((IplImage) capture_->image).imageData;
#else
    try
    {
      capture_ = bridge_.imgMsgToCv (image_msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException & e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what()); 
    }
    dataPtr = (ARUint8 *) capture_->imageData;
#endif

    // detect the markers in the video frame
    if (arDetectMarker (dataPtr, threshold_, &marker_info, &marker_num) < 0)
    {
      argCleanup ();
      ROS_BREAK ();
    }

    arPoseMarkers_.markers.clear ();
    // check for known patterns
    for (knownPatternCount = 0; knownPatternCount < objectnum; knownPatternCount++)
    {
      k = -1;	//haven't yet seen my pattern yet. 
	  //marker_num is how many markers were actually found
      for (j = 0; j < marker_num; j++)
      {
        if (object[knownPatternCount].id == marker_info[j].id)
        {
          if (k == -1)	//if this is the first wild sighting
            k = j;		//which marker matches my pattern?
          else                  // make sure you have the best pattern (highest confidence factor)
          if (marker_info[k].cf < marker_info[j].cf)
            k = j;
        }
      }//end for (j)
      if (k == -1)	//didn't find my pattern :(
      {
        object[knownPatternCount].visible = 0;
        continue;	//ok. so this just skips all the way to the next knownPatternCount
      }

      object[knownPatternCount].visible = 1;	//woohoo mark as found

	  getMarkerTransform(knownPatternCount, marker_info, k);	//transform is stored in object[knownPatternCount].trans

      double arQuat[4], arPos[3];	//for the marker
      double masterARQuat[4], masterARPos[3];	//for the master/center of the box 

	  //find the transform for the pattern to the center of the box
	  //updates master_trans_
	  findTransformToCenter(object[knownPatternCount].trans, knownPatternCount);
      
	  //arUtilMatInv (object[i].trans, cam_trans);
      arUtilMat2QuatPos (object[knownPatternCount].trans, arQuat, arPos);
      arUtilMat2QuatPos (master_trans_, masterARQuat, masterARPos);

      // **** convert to ROS frame
      double quat[4], pos[3];
      double masterQuat[4], masterPos[3];
	  convertToRosFrame(arQuat, arPos, quat, pos);
	  convertToRosFrame(masterARQuat, masterARPos, masterQuat, masterPos);
	
	  //write pos and quat out to text file 
	  if (outputToFile)	{
		  //need to output video tick
		  //i know this is incredibly incredibly not efficient :(
		  //TODO: output tick as well, from the camera_raw msg


		//convert nanoseconds -> seconds
		snprintf(buffer, BUFFER_SIZE, "%10.10f", 
			image_msg->header.stamp.toNSec()*1e-9);
	  	output << buffer <<  ",";	
	  	output << masterPos[0] << ",";	
	  	output << masterPos[1] << ",";	
	  	output << masterPos[2] << ",";	
	  	output << masterQuat[0] << ",";	
	  	output << masterQuat[1] << ",";	
	  	output << masterQuat[2] << ",";	
	  	output << masterQuat[3] << "\n";	
	  }

	  ROS_DEBUG (" Object num %i------------------------------------------------", knownPatternCount);
      ROS_DEBUG (" QUAT: Pos x: %3.5f  y: %3.5f  z: %3.5f", pos[0], pos[1], pos[2]);
      ROS_DEBUG ("     Quat qx: %3.5f qy: %3.5f qz: %3.5f qw: %3.5f", quat[0], quat[1], quat[2], quat[3]);

      // **** prepare to publish the marker
	  stuffARMarkerMsg(knownPatternCount, pos, quat,image_msg->header, marker_info);		
 

      // **** publish transform between camera and marker

#if ROS_VERSION_MINIMUM(1, 9, 0)
      tf::Quaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      tf::Vector3 origin (pos[0], pos[1], pos[2]);
      tf::Transform t (rotation, origin);
#else
// DEPRECATED: Fuerte support ends when Hydro is released
      btQuaternion rotation (quat[0], quat[1], quat[2], quat[3]);
      btVector3 origin (pos[0], pos[1], pos[2]);
      btTransform t (rotation, origin);
#endif

	  tf::Quaternion masterRotation (masterQuat[0], masterQuat[1], masterQuat[2], masterQuat[3]);
      tf::Vector3 masterOrigin (masterPos[0], masterPos[1], masterPos[2]);
      tf::Transform masterTransform (masterRotation, masterOrigin);

      if (publishTf_)
      {
		//no need to publish tfs of markers
       // tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, object[knownPatternCount].name);
      //  broadcaster_.sendTransform(camToMarker);

		tf::StampedTransform camToMaster (masterTransform, image_msg->header.stamp, image_msg->header.frame_id, "master");
        broadcaster_.sendTransform(camToMaster);

      }

      // **** publish visual marker

      if (publishVisualMarkers_)
      {
#if ROS_VERSION_MINIMUM(1, 9, 0)
        tf::Vector3 markerOrigin (0, 0, 0.25 * object[knownPatternCount].marker_width * AR_TO_ROS);
        tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
        tf::Transform markerPose = t * m; // marker pose in the camera frame 
#else
// DEPRECATED: Fuerte support ends when Hydro is released
        btVector3 markerOrigin (0, 0, 0.25 * object[i].marker_width * AR_TO_ROS);
        btTransform m (btQuaternion::getIdentity (), markerOrigin);
        btTransform markerPose = t * m; // marker pose in the camera frame
#endif

		publishVisualMarker(knownPatternCount, markerPose, image_msg->header); 
	  }
  
	} //end outer loop of for 
    arMarkerPub_.publish(arPoseMarkers_);
  
  }
void ARMultiPublisher::getTransformationCallback(const sensor_msgs::ImageConstPtr & image_msg)
{
  // Get the image from ROSTOPIC
  // NOTE: the dataPtr format is BGR because the ARToolKit library was
  // build with V4L, dataPtr format change according to the
  // ARToolKit configure option (see config.h).
  try
  {
    capture_ = bridge_.imgMsgToCv(image_msg, "bgr8");
  }
  catch (sensor_msgs::CvBridgeException & e)
  {
    ROS_ERROR ("Could not convert from >%s< to 'bgr8'.", image_msg->encoding.c_str ());
  }
  // cvConvertImage(capture,capture,CV_CVTIMG_FLIP);
  ARUint8* data_ptr = (ARUint8 *)capture_->imageData;

  // detect the markers in the video frame
  if (arDetectMarker(data_ptr, threshold_, &marker_info_, &num_detected_marker_) < 0)
  {
    argCleanup();
    ROS_BREAK ();
  }
  ROS_DEBUG("Detected >%i< of >%i< markers.", num_detected_marker_, num_total_markers_);

  double error = 0.0;
  if ((error = arMultiGetTransMat(marker_info_, num_detected_marker_, multi_marker_config_)) < 0)
  {
    // ROS_ERROR("Could not get transformation. This should never happen.");
    ROS_WARN("Could not get transformation.");
    return;
  }
  ROS_DEBUG("Error is >%f<.", error);

  for (int i = 0; i < num_detected_marker_; i++)
  {
    ROS_DEBUG("multi_marker_config_->prevF: %i", multi_marker_config_->prevF);
    ROS_DEBUG("%s: (%i) pos: %f %f id: %i cf: %f", marker_frame_.c_str(), i, marker_info_[i].pos[0], marker_info_[i].pos[1], marker_info_[i].id, marker_info_[i].cf);
  }

  // choose those with the highest confidence
  std::vector<double> cfs(num_total_markers_, 0.0);
  marker_indizes_.clear();
  for (int i = 0; i < num_total_markers_; ++i)
  {
    marker_indizes_.push_back(-1);
  }
  for (int i = 0; i < num_total_markers_; ++i)
  {
    for (int j = 0; j < num_detected_marker_; j++)
    {
      if (!(marker_info_[j].id < 0))
      {
        if (marker_info_[j].cf > cfs[marker_info_[j].id])
        {
          cfs[marker_info_[j].id] = marker_info_[j].cf;
          marker_indizes_[marker_info_[j].id] = j;
        }
      }
    }
  }

  double ar_quat[4], ar_pos[3];
  arUtilMat2QuatPos(multi_marker_config_->trans, ar_quat, ar_pos);
  tf::Quaternion rotation(-ar_quat[0], -ar_quat[1], -ar_quat[2], ar_quat[3]);
  tf::Vector3 origin(ar_pos[0] * AR_TO_ROS, ar_pos[1] * AR_TO_ROS, ar_pos[2] * AR_TO_ROS);
  tf::Transform transform(rotation, origin);
  if (multi_marker_config_->prevF && publish_tf_)
  {
    if(error < error_threshold_)
    {
      ROS_DEBUG("%f %f %f | %f %f %f %f | %f", origin.getX(), origin.getY(), origin.getZ(), rotation.getX(), rotation.getY(), rotation.getZ(), rotation.getW(), image_msg->header.stamp.toSec());
      tf::StampedTransform cam_to_marker(transform, image_msg->header.stamp, camera_frame_, marker_frame_);
      tf_broadcaster_.sendTransform(cam_to_marker);
    }
    publishErrorMarker(error, image_msg->header.stamp);
  }

  if(publish_visual_markers_)
  {
    for (int i = 0; i < num_total_markers_; i++)
    {
      if (marker_indizes_[i] >= 0)
      {
          tf::Transform marker_transform;
          getTransform(i, marker_transform);
          tf::Transform marker = transform * marker_transform;
          publishMarker(i, marker, image_msg->header.stamp);
          last_transform_ = marker;
      }
      // else
      // {
      //     publishMarker(i, last_transform_, image_msg->header.stamp);
      // }
    }
  }


}
Beispiel #19
0
/* cleanup function called when program exits */
static void cleanup(void)
{
    knVideoCapStop();
    knVideoClose();
    argCleanup();
}
//=======================================================
// 終了処理関数
//=======================================================
void Cleanup(void)
{
	arVideoCapStop();	// ビデオキャプチャの停止
	arVideoClose();		// ビデオデバイスの終了
	argCleanup();		// ARToolKitの終了処理
}