Exemple #1
0
void videocallback(IplImage *image)
{
    static IplImage *rgba;
    bool flip_image = (image->origin?true:false);
    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }

    if (init) {
        init = false;
        cout<<"Loading calibration: "<<calibrationFilename.str();
        if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) {
            cout<<" [Ok]"<<endl;
        } else {
            cam.SetRes(image->width, image->height);
            cout<<" [Fail]"<<endl;
        }
        double p[16];
        cam.GetOpenglProjectionMatrix(p,image->width,image->height);
        GlutViewer::SetGlProjectionMatrix(p);
        for (int i=0; i<32; i++) {
            d[i].SetScale(marker_size);
        }
        rgba = CvTestbed::Instance().CreateImageWithProto("RGBA", image, 0, 4);
    }
    static MarkerDetector<MarkerData> marker_detector;
    marker_detector.SetMarkerSize(marker_size); // for marker ids larger than 255, set the content resolution accordingly
    //static MarkerDetector<MarkerArtoolkit> marker_detector;
    //marker_detector.SetMarkerSize(2.8, 3, 1.5);

    // Here we try to use RGBA just to make sure that also it works...
    //cvCvtColor(image, rgba, CV_RGB2RGBA);
    marker_detector.Detect(image, &cam, true, true);
    GlutViewer::DrawableClear();
    for (size_t i=0; i<marker_detector.markers->size(); i++) {
        if (i >= 32) break;
        
        Pose p = (*(marker_detector.markers))[i].pose;
        p.GetMatrixGL(d[i].gl_mat);

        int id = (*(marker_detector.markers))[i].GetId();
        double r = 1.0 - double(id+1)/32.0;
        double g = 1.0 - double(id*3%32+1)/32.0;
        double b = 1.0 - double(id*7%32+1)/32.0;
        d[i].SetColor(r, g, b);

        GlutViewer::DrawableAdd(&(d[i]));
    }

    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }
}
// Updates the bundlePoses of the multi_marker_bundles by detecting markers and using all markers in a bundle to infer the master tag's position
void GetMultiMarkerPoses(IplImage *image) {

  if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)){
    for(int i=0; i<n_bundles; i++)
      multi_marker_bundles[i]->Update(marker_detector.markers, cam, bundlePoses[i]);
    
    if(marker_detector.DetectAdditional(image, cam, false) > 0){
      for(int i=0; i<n_bundles; i++){
	if ((multi_marker_bundles[i]->SetTrackMarkers(marker_detector, cam, bundlePoses[i], image) > 0))
	  multi_marker_bundles[i]->Update(marker_detector.markers, cam, bundlePoses[i]);
      }
    }
  }
}
void GetMarkerPoses(IplImage *image, ARCloud &cloud) {

  //Detect and track the markers
  if (marker_detector.Detect(image, cam, true, false, max_new_marker_error,
			     max_track_error, CVSEQ, true)) 
    {
      printf("\n--------------------------\n\n");
      for (size_t i=0; i<marker_detector.markers->size(); i++)
     	{
	  vector<cv::Point, Eigen::aligned_allocator<cv::Point> > pixels;
	  Marker *m = &((*marker_detector.markers)[i]);
	  int id = m->GetId();
	  cout << "******* ID: " << id << endl;

	  int resol = m->GetRes();
	  int ori = m->ros_orientation;
      
	  PointDouble pt1, pt2, pt3, pt4;
	  pt4 = m->ros_marker_points_img[0];
	  pt3 = m->ros_marker_points_img[resol-1];
	  pt1 = m->ros_marker_points_img[(resol*resol)-resol];
	  pt2 = m->ros_marker_points_img[(resol*resol)-1];
	  
	  m->ros_corners_3D[0] = cloud(pt1.x, pt1.y);
	  m->ros_corners_3D[1] = cloud(pt2.x, pt2.y);
	  m->ros_corners_3D[2] = cloud(pt3.x, pt3.y);
	  m->ros_corners_3D[3] = cloud(pt4.x, pt4.y);
	  
	  if(ori >= 0 && ori < 4){
	    if(ori != 0){
	      std::rotate(m->ros_corners_3D.begin(), m->ros_corners_3D.begin() + ori, m->ros_corners_3D.end());
	    }
	  }
	  else
	    ROS_ERROR("FindMarkerBundles: Bad Orientation: %i for ID: %i", ori, id);

	  //Get the 3D marker points
	  BOOST_FOREACH (const PointDouble& p, m->ros_marker_points_img)
	    pixels.push_back(cv::Point(p.x, p.y));	  
	  ARCloud::Ptr selected_points = ata::filterCloud(cloud, pixels);

	  //Use the kinect data to find a plane and pose for the marker
	  int ret = PlaneFitPoseImprovement(i, m->ros_corners_3D, selected_points, cloud, m->pose);	
	}
    }
int main(int, char **)
{
  VideoCapture cap(0);
  static MarkerDetector<MarkerData> marker_detector;
  marker_detector.SetMarkerSize(15); // for marker ids larger than 255, set the content resolution accordingly

      
  if (cap.isOpened())
  {
    frame;
    namedWindow("portal");
    while(true)
    {
      cap >> frame;
      
      
      IplImage image = frame;
      marker_detector.Detect(&image, &cam, true, true);
      if (marker_detector.markers->size() > 0)
	cout << "Detected " << marker_detector.markers->size() << endl;
      
      for (size_t i=0; i<marker_detector.markers->size(); i++)
      {
        if (i >= 32) break;
        
        Pose p = (*(marker_detector.markers))[i].pose;
	
	vector<PointDouble> corners = (*(marker_detector.markers))[i].marker_corners_img;
	
	//cout << "corners size(4) == " << corners.size() << endl;
	/*
	line(frame, bestSquares[i][0], bestSquares[i][1], Scalar(0,255,0), 2);
	line(frame, bestSquares[i][1], bestSquares[i][2], Scalar(0,255,0), 2);
	line(frame, bestSquares[i][2], bestSquares[i][3], Scalar(0,255,0), 2);
	line(frame, bestSquares[i][3], bestSquares[i][0], Scalar(0,255,0), 2);*/
      }
      
      imshow("portal", frame);
      
      
      if(waitKey(30) >= 0) break;
    }
  }
// Updates the bundlePoses of the multi_marker_bundles by detecting markers and
// using all markers in a bundle to infer the master tag's position
void GetMultiMarkerPoses(IplImage *image, ARCloud &cloud) {

  for(int i=0; i<n_bundles; i++){
    master_visible[i] = false;
    bundles_seen[i] = 0;
  }
  
  //Detect and track the markers
  if (marker_detector.Detect(image, cam, true, false, max_new_marker_error,
			     max_track_error, CVSEQ, true)) 
    {
      //printf("\n--------------------------\n\n");
      for (size_t i=0; i<marker_detector.markers->size(); i++)
    	{
	  vector<cv::Point, Eigen::aligned_allocator<cv::Point> > pixels;
	  Marker *m = &((*marker_detector.markers)[i]);
	  int id = m->GetId();
	  //cout << "******* ID: " << id << endl;
      
	  //Get the 3D points of the outer corners
          /*
	  PointDouble corner0 = m->marker_corners_img[0];
	  PointDouble corner1 = m->marker_corners_img[1];
	  PointDouble corner2 = m->marker_corners_img[2];
	  PointDouble corner3 = m->marker_corners_img[3];
	  m->ros_corners_3D[0] = cloud(corner0.x, corner0.y);
	  m->ros_corners_3D[1] = cloud(corner1.x, corner1.y);
	  m->ros_corners_3D[2] = cloud(corner2.x, corner2.y);
	  m->ros_corners_3D[3] = cloud(corner3.x, corner3.y);
	  */
          
	  //Get the 3D inner corner points - more stable than outer corners that can "fall off" object
	  int resol = m->GetRes();
	  int ori = m->ros_orientation;
      
	  PointDouble pt1, pt2, pt3, pt4;
	  pt4 = m->ros_marker_points_img[0];
	  pt3 = m->ros_marker_points_img[resol-1];
	  pt1 = m->ros_marker_points_img[(resol*resol)-resol];
	  pt2 = m->ros_marker_points_img[(resol*resol)-1];
	  
	  m->ros_corners_3D[0] = cloud(pt1.x, pt1.y);
	  m->ros_corners_3D[1] = cloud(pt2.x, pt2.y);
	  m->ros_corners_3D[2] = cloud(pt3.x, pt3.y);
	  m->ros_corners_3D[3] = cloud(pt4.x, pt4.y);
	  
	  if(ori >= 0 && ori < 4){
	    if(ori != 0){
	      std::rotate(m->ros_corners_3D.begin(), m->ros_corners_3D.begin() + ori, m->ros_corners_3D.end());
	    }
	  }
	  else
	    ROS_ERROR("FindMarkerBundles: Bad Orientation: %i for ID: %i", ori, id);

	  //Check if we have spotted a master tag
	  int master_ind = -1;
	  for(int j=0; j<n_bundles; j++){
	    if(id == master_id[j])
	      master_visible[j] = true; 
	    master_ind = j;
	  }

	  //Mark the bundle that marker belongs to as "seen"
	  int bundle_ind = -1;
	  for(int j=0; j<n_bundles; j++){
	    for(int k=0; k<bundle_indices[j].size(); k++){
	      if(bundle_indices[j][k] == id){
            bundle_ind = j;
            bundles_seen[j] += 1;
            break;
          }
        }
      }

	  //Get the 3D marker points
	  BOOST_FOREACH (const PointDouble& p, m->ros_marker_points_img)
	    pixels.push_back(cv::Point(p.x, p.y));	  
	  ARCloud::Ptr selected_points = ata::filterCloud(cloud, pixels);

	  //Use the kinect data to find a plane and pose for the marker
	  int ret = PlaneFitPoseImprovement(i, m->ros_corners_3D, selected_points, cloud, m->pose);
            
	  //If the plane fit fails...
	  if(ret < 0){
		//Mark this tag as invalid
		m->valid = false;
	    //If this was a master tag, reset its visibility
	    if(master_ind >= 0)
	      master_visible[master_ind] = false;
	    //decrement the number of markers seen in this bundle
	    bundles_seen[bundle_ind] -= 1;
	      
	  }
	  else
		m->valid = true;
	}	

      //For each master tag, infer the 3D position of its corners from other visible tags
      //Then, do a plane fit to those new corners   	
      ARCloud inferred_corners;
      for(int i=0; i<n_bundles; i++){
        if(bundles_seen[i] > 0){
            //if(master_visible[i] == false){
                if(InferCorners(cloud, *(multi_marker_bundles[i]), inferred_corners) >= 0){
                    ARCloud::Ptr inferred_cloud(new ARCloud(inferred_corners));
                    PlaneFitPoseImprovement(i+5000, inferred_corners, inferred_cloud, cloud, bundlePoses[i]);
                }
            //}
            //If master is visible, use it directly instead of inferring pose
            //else{
            //    for (size_t j=0; j<marker_detector.markers->size(); j++){
            //        Marker *m = &((*marker_detector.markers)[j]);                     
            //        if(m->GetId() == master_id[i])
            //            bundlePoses[i] = m->pose;
            //    } 
            //}
            Pose ret_pose;
            if(med_filt_size > 0){
                med_filts[i]->addPose(bundlePoses[i]);
                med_filts[i]->getMedian(ret_pose);
                bundlePoses[i] = ret_pose;
            }   
        }		
    }
  }
}
void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg)
{
	if(init){
		CvSize sz_ = cvSize (cam->x_res, cam->y_res);
    	capture_ = cvCreateImage (sz_, IPL_DEPTH_8U, 4);
		init = false;	
	}

	//If we've already gotten the cam info, then go ahead
	if(cam->getCamInfo_){
		try{
			tf::StampedTransform CamToOutput;
    			try{
					tf_listener->waitForTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, ros::Duration(1.0));
					tf_listener->lookupTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, CamToOutput);
   				}
    			catch (tf::TransformException ex){
      				ROS_ERROR("%s",ex.what());
    			}

      		capture_ = bridge_.imgMsgToCv (image_msg, "rgb8");
			marker_detector.Detect(capture_, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true);

			arPoseMarkers_.markers.clear ();
			for (size_t i=0; i<marker_detector.markers->size(); i++) 
			{
				//Get the pose relative to the camera
        		int id = (*(marker_detector.markers))[i].GetId(); 
				Pose p = (*(marker_detector.markers))[i].pose;
				double px = p.translation[0]/100.0;
				double py = p.translation[1]/100.0;
				double pz = p.translation[2]/100.0;
				double qx = p.quaternion[1];
				double qy = p.quaternion[2];
				double qz = p.quaternion[3];
				double qw = p.quaternion[0];

				tf::Quaternion rotation (qx,qy,qz,qw);
      			tf::Vector3 origin (px,py,pz);
      			tf::Transform t (rotation, origin);
				tf::Vector3 markerOrigin (0, 0, 0);
				tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
				tf::Transform markerPose = t * m; // marker pose in the camera frame

				//Publish the transform from the camera to the marker		
				std::string markerFrame = "ar_marker_";
				std::stringstream out;
				out << id;
				std::string id_string = out.str();
				markerFrame += id_string;
				tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame.c_str());
    			tf_broadcaster->sendTransform(camToMarker);
				
				//Create the rviz visualization messages
				tf::poseTFToMsg (markerPose, rvizMarker_.pose);
				rvizMarker_.header.frame_id = image_msg->header.frame_id;
				rvizMarker_.header.stamp = image_msg->header.stamp;
				rvizMarker_.id = id;

				rvizMarker_.scale.x = 1.0 * marker_size/100.0;
				rvizMarker_.scale.y = 1.0 * marker_size/100.0;
				rvizMarker_.scale.z = 0.2 * marker_size/100.0;
				rvizMarker_.ns = "basic_shapes";
				rvizMarker_.type = visualization_msgs::Marker::CUBE;
				rvizMarker_.action = visualization_msgs::Marker::ADD;
				switch (id)
				{
				  case 0:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 1.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 1:
				    rvizMarker_.color.r = 1.0f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 0.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 2:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 1.0f;
				    rvizMarker_.color.b = 0.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 3:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 0.5f;
				    rvizMarker_.color.b = 0.5f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 4:
				    rvizMarker_.color.r = 0.5f;
				    rvizMarker_.color.g = 0.5f;
				    rvizMarker_.color.b = 0.0;
				    rvizMarker_.color.a = 1.0;
				    break;
				  default:
				    rvizMarker_.color.r = 0.5f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 0.5f;
				    rvizMarker_.color.a = 1.0;
				    break;
				}
				rvizMarker_.lifetime = ros::Duration (1.0);
				rvizMarkerPub_.publish (rvizMarker_);

				//Get the pose of the tag in the camera frame, then the output frame (usually torso)				
				tf::Transform tagPoseOutput = CamToOutput * markerPose;

				//Create the pose marker messages
				ar_track_alvar::AlvarMarker ar_pose_marker;
				tf::poseTFToMsg (tagPoseOutput, ar_pose_marker.pose.pose);
      			ar_pose_marker.header.frame_id = output_frame;
			    ar_pose_marker.header.stamp = image_msg->header.stamp;
			    ar_pose_marker.id = id;
			    arPoseMarkers_.markers.push_back (ar_pose_marker);	
			}
			arMarkerPub_.publish (arPoseMarkers_);
		}
    	catch (sensor_msgs::CvBridgeException & e){
      		ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ());
    	}
	}
}
double GetMultiMarkerPose(IplImage *image, Pose &pose) {
    static bool init=true;

    if (init) {
        init=false;
        vector<int> id_vector;
        for(int i = 0; i < nof_markers; ++i)
            id_vector.push_back(i);
        // We make the initialization for MultiMarkerBundle using MultiMarkerInitializer
        // Each marker needs to be visible in at least two images and at most 64 image are used.
        multi_marker_init = new MultiMarkerInitializer(id_vector, 2, 64);
        pose.Reset();
        multi_marker_init->PointCloudAdd(id_vector[0], marker_size, pose);
        multi_marker_bundle = new MultiMarkerBundle(id_vector);
		marker_detector.SetMarkerSize(marker_size); 
    }

    double error = -1;
    if (!optimize_done) {
        if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) {
        	error = multi_marker_init->Update(marker_detector.markers, cam, pose);
        }
    } 
	else {
    	if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)) {
            error = multi_marker_bundle->Update(marker_detector.markers, cam, pose);
            if ((multi_marker_bundle->SetTrackMarkers(marker_detector, cam, pose, image) > 0) && (marker_detector.DetectAdditional(image, cam, false) > 0))
            {    
            	error = multi_marker_bundle->Update(marker_detector.markers, cam, pose);
            }
        }
    }

   if (add_measurement) {
		cout << "Markers seen: " << marker_detector.markers->size() << "\n";
        if (marker_detector.markers->size() >= 2) {
            cout<<"Adding measurement..."<<endl;
            multi_marker_init->MeasurementsAdd(marker_detector.markers);
        }
		else{
			cout << "Not enough markers to capture measurement\n";
    	}
		add_measurement=false;
	}

    if (optimize) {
        cout<<"Initializing..."<<endl;
        if (!multi_marker_init->Initialize(cam)) {
            cout<<"Initialization failed, add some more measurements."<<endl;

        } else {
            // Reset the bundle adjuster.
            multi_marker_bundle->Reset();
            multi_marker_bundle->MeasurementsReset();
            // Copy all measurements into the bundle adjuster.
            for (int i = 0; i < multi_marker_init->getMeasurementCount(); ++i) {
                Pose p2;
                multi_marker_init->getMeasurementPose(i, cam, p2);
                const std::vector<MultiMarkerInitializer::MarkerMeasurement, Eigen::aligned_allocator<MultiMarkerInitializer::MarkerMeasurement> > markers = multi_marker_init->getMeasurementMarkers(i);
                multi_marker_bundle->MeasurementsAdd(&markers, p2);
            }
            // Initialize the bundle adjuster with initial marker poses.
            multi_marker_bundle->PointCloudCopy(multi_marker_init);
            cout<<"Optimizing..."<<endl;
            if (multi_marker_bundle->Optimize(cam, 0.01, 20)) {
                cout<<"Optimizing done"<<endl;
                optimize_done=true;

            } else {
                cout<<"Optimizing FAILED!"<<endl;
            }
        }
        optimize=false;
    }
    return error;
}
void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg)
{
	//If we've already gotten the cam info, then go ahead
	if(cam->getCamInfo_){
		try{
			tf::StampedTransform CamToOutput;
    			try{
					tf_listener->waitForTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, ros::Duration(1.0));
					tf_listener->lookupTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, CamToOutput);
   				}
    			catch (tf::TransformException ex){
      				ROS_ERROR("%s",ex.what());
    			}


            //Convert the image
            cv_ptr_ = cv_bridge::toCvCopy(image_msg, sensor_msgs::image_encodings::BGR8);

            //Get the estimated pose of the main markers by using all the markers in each bundle

            // GetMultiMarkersPoses expects an IplImage*, but as of ros groovy, cv_bridge gives
            // us a cv::Mat. I'm too lazy to change to cv::Mat throughout right now, so I
            // do this conversion here -jbinney
            IplImage ipl_image = cv_ptr_->image;

            marker_detector.Detect(&ipl_image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true);

			arPoseMarkers_.markers.clear ();
			for (size_t i=0; i<marker_detector.markers->size(); i++) 
			{
				//Get the pose relative to the camera
        		int id = (*(marker_detector.markers))[i].GetId(); 
				Pose p = (*(marker_detector.markers))[i].pose;
				double px = p.translation[0]/100.0;
				double py = p.translation[1]/100.0;
				double pz = p.translation[2]/100.0;
				double qx = p.quaternion[1];
				double qy = p.quaternion[2];
				double qz = p.quaternion[3];
				double qw = p.quaternion[0];

                tf::Quaternion rotation (qx,qy,qz,qw);
                tf::Vector3 origin (px,py,pz);
                tf::Transform t (rotation, origin);
                tf::Vector3 markerOrigin (0, 0, 0);
                tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
                tf::Transform markerPose = t * m; // marker pose in the camera frame

				//Publish the transform from the camera to the marker		
				std::string markerFrame = "ar_marker_";
				std::stringstream out;
				out << id;
				std::string id_string = out.str();
				markerFrame += id_string;
				tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame.c_str());
    			tf_broadcaster->sendTransform(camToMarker);
				
				//Create the rviz visualization messages
				tf::poseTFToMsg (markerPose, rvizMarker_.pose);
				rvizMarker_.header.frame_id = image_msg->header.frame_id;
				rvizMarker_.header.stamp = image_msg->header.stamp;
				rvizMarker_.id = id;

				rvizMarker_.scale.x = 1.0 * marker_size/100.0;
				rvizMarker_.scale.y = 1.0 * marker_size/100.0;
				rvizMarker_.scale.z = 0.2 * marker_size/100.0;
				rvizMarker_.ns = "basic_shapes";
				rvizMarker_.type = visualization_msgs::Marker::CUBE;
				rvizMarker_.action = visualization_msgs::Marker::ADD;
				switch (id)
				{
				  case 0:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 1.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 1:
				    rvizMarker_.color.r = 1.0f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 0.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 2:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 1.0f;
				    rvizMarker_.color.b = 0.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 3:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 0.5f;
				    rvizMarker_.color.b = 0.5f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 4:
				    rvizMarker_.color.r = 0.5f;
				    rvizMarker_.color.g = 0.5f;
				    rvizMarker_.color.b = 0.0;
				    rvizMarker_.color.a = 1.0;
				    break;
				  default:
				    rvizMarker_.color.r = 0.5f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 0.5f;
				    rvizMarker_.color.a = 1.0;
				    break;
				}
				rvizMarker_.lifetime = ros::Duration (1.0);
				rvizMarkerPub_.publish (rvizMarker_);

				//Get the pose of the tag in the camera frame, then the output frame (usually torso)				
				tf::Transform tagPoseOutput = CamToOutput * markerPose;

				//Create the pose marker messages
				ar_track_alvar_msgs::AlvarMarker ar_pose_marker;
				tf::poseTFToMsg (tagPoseOutput, ar_pose_marker.pose.pose);
      			ar_pose_marker.header.frame_id = output_frame;
			    ar_pose_marker.header.stamp = image_msg->header.stamp;
			    ar_pose_marker.id = id;
			    arPoseMarkers_.markers.push_back (ar_pose_marker);	
			}
			arMarkerPub_.publish (arPoseMarkers_);
		}
        catch (cv_bridge::Exception& e){
      		ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ());
    	}
	}
}
void videocallback(IplImage *image)
{
    static Camera cam;
    Pose pose;
    bool flip_image = (image->origin?true:false);
    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }

    static bool init = true;
    if (init)
    {

        init = false;

        // Initialize camera
        cout<<"Loading calibration: "<<calibrationFilename.str();

        if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height))
        {
            cout<<" [Ok]"<<endl;
        }
        else
        {
            cam.SetRes(image->width, image->height);
            cout<<" [Fail]"<<endl;
        }

        vector<int> id_vector;
        for(int i = 0; i < nof_markers; ++i)
            id_vector.push_back(i);

        // We make the initialization for MultiMarkerBundle using a fixed marker field (can be printed from ALVAR.pdf)
        marker_detector.SetMarkerSize(marker_size);
        marker_detector.SetMarkerSizeForId(0, marker_size*2);
        multi_marker = new MultiMarker(id_vector);
        pose.Reset();
        multi_marker->PointCloudAdd(0, marker_size*2, pose);
        pose.SetTranslation(-marker_size*2.5, +marker_size*1.5, 0);
        multi_marker->PointCloudAdd(1, marker_size, pose);
        pose.SetTranslation(+marker_size*2.5, +marker_size*1.5, 0);
        multi_marker->PointCloudAdd(2, marker_size, pose);
        pose.SetTranslation(-marker_size*2.5, -marker_size*1.5, 0);
        multi_marker->PointCloudAdd(3, marker_size, pose);
        pose.SetTranslation(+marker_size*2.5, -marker_size*1.5, 0);
        multi_marker->PointCloudAdd(4, marker_size, pose);
    }

    double error=-1;
    if (marker_detector.Detect(image, &cam, true, (visualize == 1), 0.0)) {
        if (detect_additional) {
            error = multi_marker->Update(marker_detector.markers, &cam, pose);
            multi_marker->SetTrackMarkers(marker_detector, &cam, pose, visualize ? image : NULL);
            marker_detector.DetectAdditional(image, &cam, (visualize == 1));
        }
        if (visualize == 2) 
            error = multi_marker->Update(marker_detector.markers, &cam, pose, image);
        else
            error = multi_marker->Update(marker_detector.markers, &cam, pose);
    }

    static Marker foo;
    foo.SetMarkerSize(marker_size*4);
    if ((error >= 0) && (error < 5))
    {
        foo.pose = pose;
    }
    foo.Visualize(image, &cam);

    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }
}