// The image callback function for the master camera/ downward looking camera
    void imageCallback(const sensor_msgs::ImageConstPtr& img_msg) {
        if (isPTAMshouldstop) {
            cout << "PTAM SHOULD STOP ASSUMING OUTLIERS OR LANDING ALREADY!" << endl;
//            return;
        }

        // TODO: synchronise dual images, could be simply assuming that their stamps are close to each other
        // main function should start when both imgs have come.

        ros::Time msg_stamp = img_msg->header.stamp;
        frame_stamp_ = img_msg->header.stamp.toSec();
//        ROS_INFO("MASTER IMG TIMESTAMP: %f", frame_stamp_);

        if (ini_one_circle_ && !isFinishIniPTAMwithcircle)
        {
            if ( !isimage_ini_call || !isviconcall) return;
            else{
                convertImage(image_ini, 1);
                msg_stamp = image_ini->header.stamp;

                CVD::img_save(frameBW_, "inimage.jpg");
            }
        }
        else
            convertImage(img_msg, 1);

        // if ptam is initialised, forward image is now usable
        cursecimg_good = false;
        if ( isdualcam && secondimgcame )// && isFinishIniPTAMwithcircle)
        {
            double imginterval = fabs(img_msg->header.stamp.toSec()-img_secmsg->header.stamp.toSec());
            if ( imginterval < intervalmax_imgdual)
            {
                secondcamlock.lock();
                convertImage(img_secmsg, 2);
                secondcamlock.unlock();
                cursecimg_good = true;// only when img is synchronised and ptam ini already
            }
        }

        // pass image frame along to PTAM
        if (use_circle_ini_ && !isFinishIniPTAMwithcircle){
            // use ini pose and image from another node: simple_landing node
            if ((ini_one_circle_ || ini_two_circle_) && !tracker_->circle_pose_get ){
                tracker_->circle_pose_get = true;
                tracker_->se3CfromW = tfs2se3(vicon_state);// just need Twc
                tracker_->time_stamp_circle = vicon_state.header.stamp.toSec();
                tracker_->time_stamp_now = ros::Time::now().toSec();
            }

            // use ground information and IMU/EKF info.
            else if (ini_ground_ && !isEKFattitudeInicall && useEKFiniAtti){
                cout << "Waiting for attitude from EKF node..." << endl;
                return;
            }
            else if (ini_ground_ && !isattitudecall && !useEKFiniAtti){
                cout << "Waiting for attitude from IMU..." << endl;
                return;
            }
            else if (ini_ground_ && useEKFiniAtti && isEKFattitudeInicall ){
                if (!tracker_->attitude_get){

                    tracker_->se3IMU_camfromW = IMU2camWorldfromQuat(iniAttiEKF);//Tcw from Twi IMU calculated attitude

                    cout << "PTAM attitude got from EKF: "<< iniAttiEKF.w() <<", "
                                                << iniAttiEKF.x() <<", "
                                                << iniAttiEKF.y() <<", "
                                                << iniAttiEKF.z() <<endl;

                    tracker_->attitude_get = true;
                }
            }
            else if (ini_ground_ && !useEKFiniAtti && isattitudecall){
                if (!tracker_->attitude_get){
                    mAttitudelock.lock();
                    tracker_->se3IMU_camfromW = imu2camWorld(attitude_data_);//Twc from IMU calculated attitude
                    mAttitudelock.unlock();

                    cout << "PTAM pose got from IMU: "<< attitude_data_.roll <<", "
                            << attitude_data_.pitch<<endl;

                    tracker_->attitude_get = true;
                }
            }
        }

        if (use_ekf_pose && ispose_predicted_ekfcall) {
            mPosePredictedlock.lock();
            tracker_->se3CfromW_predicted_ekf.get_translation()[0] = pose_predicted_ekf_state_.pose.pose.position.x;
            tracker_->se3CfromW_predicted_ekf.get_translation()[1] = pose_predicted_ekf_state_.pose.pose.position.y;
            tracker_->se3CfromW_predicted_ekf.get_translation()[2] = pose_predicted_ekf_state_.pose.pose.position.z;
            tracker_->se3CfromW_predicted_ekf.get_rotation() = orientation_ekf_;
            tracker_->scale_ekf = scale_ekf_;// This is the scale of the pose estimation in the previous image frame
            mPosePredictedlock.unlock();
        }

        ros::Time time1 = ros::Time::now();


        // for debuging
//        static int published_num = 0;
//        if (published_num == 2)
//            return;

        // Should be able to input dual image in the callback function
        if (!cursecimg_good)
            tracker_->TrackFrame(frameBW_);
        else
            tracker_->TrackFrame(frameBW_, frameBW_sec);

        if (!isFinishIniPTAMwithcircle){
            camPoselast = tracker_->GetCurrentPose();
            camPosethis = camPoselast;
            camPose4pub = camPoselast;
        }
        else{
            camPosethis = tracker_->GetCurrentPose();
            camPose4pub = camPosethis;
        }
        isFinishIniPTAMwithcircle = true;

        ros::Time time2 = ros::Time::now();
        ros::Duration time3 = time2 - time1;
//        ROS_INFO("PTAM time cost: %f", time3.toSec());

        if (use_circle_ini_)
            cout << tracker_->GetMessageForUser() << " " << endl;
        else
            cout << tracker_->GetMessageForUser() << endl;

        // ignore those too low information, useful when landing
        // also currently, when outliers happen, should stop PTAM for safty.
        if ((ini_one_circle_ && tracker_->GetCurrentPose().inverse().get_translation()[2] < 0.3)
                ||(sqrt((camPosethis.get_translation()-camPoselast.get_translation())*
                        (camPosethis.get_translation()-camPoselast.get_translation()))>0.5))
        {
            isPTAMshouldstop = true;
//            return;
            // camposelast keep still, just publish it
            camPose4pub = camPoselast;
        }
        else
            camPoselast = camPosethis;
        // publish current pose in map in different cases
        if ( (use_circle_ini_ && !use_ekf_pose) || (use_ekf_pose && isEKFattitudeInicall)){
                publishPose(msg_stamp);
                publishPosewithCovariance(msg_stamp);
                inipose_published = true;

                // yang, publish landing object pose to set-point node?
                if (tracker_->isLandingpadPoseGet){
                    publish_inilandingpadpose(msg_stamp);
                    if (!tracker_->isFinishPadDetection)
                        publish_finallandingpadpose(msg_stamp);
                }
        }

        if (use_circle_ini_){
            static geometry_msgs::TransformStamped ptam_state;
            if (tracker_->ini_circle_finished)
                ptam_state.transform.rotation.w = 1;//only for state sign recognition
            else
                ptam_state.transform.rotation.w = 0;
            ini_circle_pub_.publish(ptam_state);
        }

        if (sendvisual){// && !isPTAMshouldstop){
            if (cam_marker_pub_.getNumSubscribers() > 0 || point_marker_pub_.getNumSubscribers() > 0)
                map_viz_->publishMapVisualization(map_,tracker_,cam_marker_pub_,point_marker_pub_, cam_marker_pub_sec, isdualcam);
            if ((landingpad_marker_pub_.getNumSubscribers() > 0) && tracker_->isLandingpadPoseGet)
                map_viz_->publishlandingpad(tracker_, landingpad_marker_pub_);
        }

        if (show_debug_image_) {
//            cv::Mat rgb_cv(480, 640, CV_8UC3, frameRGB_.data());
//            map_viz_->renderDebugImage(rgb_cv,tracker_,map_);
//            cv::imshow("downwards",rgb_cv);
//            cv::waitKey(10);
//            static int savedebugimg = 0;
//            if (savedebugimg == 0){
//                cv::imwrite("inimagedebug.jpg", rgb_cv);
//                savedebugimg = 1;
//            }
            if (cursecimg_good){
                cv::Mat rgb_cv(480, 640, CV_8UC3, frameRGB_sec.data());
                map_viz_->renderDebugImageSec(rgb_cv,tracker_,map_);
                cv::imshow("forwards",rgb_cv);
                cv::waitKey(10);
            }

            if (false)//(istrackPad)
            {
                CVD::Image<CVD::Rgb<CVD::byte> > frameRef;
                CVD::Image<CVD::byte> frameRefbw;
                frameRefbw.resize(tracker_->GetRefFrame().aLevels[0].im.size());
                frameRef.resize(tracker_->GetRefFrame().aLevels[0].im.size());
                CVD::copy(tracker_->GetRefFrame().aLevels[0].im, frameRefbw);
                //            memcpy(frameRefbw.data(), tracker_->GetRefFrame().aLevels[0].im.data(), tracker_->GetRefFrame().aLevels[0].im.totalsize());
                CVD::convert_image(frameRefbw, frameRef);
                cv::Mat rgb_ref(frameRef.size().y, frameRef.size().x, CV_8UC3, frameRef.data());
                map_viz_->renderRefImage(rgb_ref, tracker_);
                cv::imshow("ref", rgb_ref);
                cv::waitKey(10);
            }

        }
//        isviconcall = false;
    }
Exemple #2
0
	void VrmlGame::detect_corners(std::string &markerdata, std::vector<std::vector<Wml::Vector2d> > &corners)
	{
		bool ret = g_markerkeybase.SetFile(markerdata);

		if (!ret)
		{
			return;
		}

		//////////////////////////////////////////////////////////////////////////
		/// \brief Generated features.
		//////////////////////////////////////////////////////////////////////////
		std::vector<FeatureSpace::ArtificialFeature> _features;

		//////////////////////////////////////////////////////////////////////////
		/// \brief Found, <feature index, key index>
		//////////////////////////////////////////////////////////////////////////
		std::map<int, int> _featurekey;

		corners.resize(mpMap->vpKeyFrames.size());
		int i = 0;
		vector<KeyFrame *>::iterator kf;
		for( kf = mpMap->vpKeyFrames.begin(); kf != mpMap->vpKeyFrames.end(); kf++ )
		{
			corners[i].clear();

			CSimpleImageb simage;

			CVD::Image<CVD::byte> tempimage = (*kf)->aLevels[0].im;
			int height,width;
			height = tempimage.size().y;
			width = tempimage.size().x;
			//convert coordinate
			unsigned char* tempdata = (unsigned char*)malloc(sizeof(unsigned char)*width*height);
			for (int j=0; j<height; j++)
			{
				memcpy(tempdata+width*(height-1-j),(CVD::byte*)tempimage.data()+width*j,width*sizeof(unsigned char));
			}

			simage.set_data(tempdata, width, height, 1);
			free(tempdata);
			Pattern::CSimpleMarkerDetector smd;
			std::vector<pattern_parameter<MARKER_FLOAT> > patterns;

			bool ret = smd.Fit(simage, patterns);


			//////////////////////////////////////////////////////////////////////////
			std::vector<Pattern::RectHomography<MARKER_FLOAT> > recthomos;
			recthomos.resize(patterns.size());


			// use the pattern to extract the image features.
			std::vector<pattern_parameter<MARKER_FLOAT> >::iterator it = patterns.begin();

			for (int p = 0; it != patterns.end(); ++ it, ++ p)
			{
				Pattern::RectHomography<MARKER_FLOAT> &obj = recthomos[p];

				obj.corners = it->corners;
				assert(obj.corners.size() == 4);  
			}

			//////////////////////////////////////////////////////////////////////////
			CSimpleImagef grey_image;
			SimpleImage::grayscale(simage, grey_image);

			SimpleImage::flip(grey_image);


			// convert the marker region image to feature description.
			CArtificialFeatureMatch::Pattern2Feature(grey_image, recthomos, _features);

			//////////////////////////////////////////////////////////////////////////
			CArtificialFeatureMatch::SearchForFeature(_features, g_markerkeybase, _featurekey);


			//////////////////////////////////////////////////////////////////////////
			for(int c = 0; c < _features.size(); ++c)
			{
				if (_features[c]._state == FeatureSpace::CALIBRATED)
				{
					assert (_features[c]._corners.size() == 4);

					for(int p = 0; p < _features[c]._corners.size(); ++p)
					{
						corners[i].push_back(Wml::Vector2d(_features[c]._corners[p].x2, _features[c]._corners[p].y2));
						std::cout<<_features[c]._corners[p].x2<<","<<_features[c]._corners[p].y2<<std::endl;
					}
					std::cout<<i<<" image marker found\n";
				}
				break;
			}
			i++;
		}
	}
// Publish the small preview image
void SystemFrontendBase::PublishSmallImage()
{
  ROS_ASSERT(mpTracker);

  // Don't even bother doing image conversion if nobody's listening
  if (mSmallImagePub.getNumSubscribers() == 0)
    return;

  // We're going to create a small tiled image, based on mmSmallImageOffsets, and copy
  // individual downsampled images into specific locations in the image
  CVD::Image<CVD::byte> imSmall(mirSmallImageSize, 0);

  // Pointcloud to hold measurements. Z axis corresponds to pyramid level
  pcl::PointCloud<pcl::PointXYZ>::Ptr pointMsg(new pcl::PointCloud<pcl::PointXYZ>());

  for (ImageRefMap::iterator offset_it = mmSmallImageOffsets.begin(); offset_it != mmSmallImageOffsets.end();
       ++offset_it)
  {
    std::string camName = offset_it->first;
    CVD::ImageRef irOffset = offset_it->second;

    CVD::Image<CVD::byte> imTracker = mpTracker->GetKeyFrameImage(camName, mnSmallImageLevel);

    // When tracker just added an MKF to the map, it will have gotten rid of the MKF it is holding, so
    // imTracker will be an empty image. CVD::copy doesn't like this too much.
    if (imTracker.totalsize() > 0)
      CVD::copy(imTracker, imSmall, imTracker.size(), CVD::ImageRef(), irOffset);

    for (unsigned l = 0; l < LEVELS; ++l)
    {
      std::vector<TooN::Vector<2>> vMeas = mpTracker->GetKeyFrameSimpleMeas(camName, l);

      for (unsigned i = 0; i < vMeas.size(); ++i)
      {
        TooN::Vector<2> v2MeasInSmallImage = CVD::vec(irOffset) + LevelNPos(vMeas[i], mnSmallImageLevel);
        pcl::PointXYZ pclPoint;

        pclPoint.x = v2MeasInSmallImage[0];
        pclPoint.y = v2MeasInSmallImage[1];
        pclPoint.z = l;

        pointMsg->points.push_back(pclPoint);
      }
    }
  }

  ROS_DEBUG_STREAM("Collected " << pointMsg->points.size() << " simple measurements and put them in a point cloud");

  ros::Time timestamp = mpTracker->GetCurrentTimestamp();

  pointMsg->header.frame_id = "tracker_small_image";
  pointMsg->width = pointMsg->points.size();
  pointMsg->height = 1;
  pointMsg->is_dense = false;

#if ROS_VERSION_MINIMUM(1, 9, 56)  // Hydro or above, uses new PCL library
  pointMsg->header.stamp = timestamp.toNSec();
#else
  pointMsg->header.stamp = timestamp;
#endif

  sensor_msgs::Image imageMsg;
  imageMsg.header.stamp = timestamp;
  util::ImageToMsg(imSmall, imageMsg);

  mSmallImagePointsPub.publish(pointMsg);
  mSmallImagePub.publish(imageMsg);
}