コード例 #1
0
void VideoSource::GetAndFillFrameBWandRGB(CVD::Image<CVD::byte> &imBW, CVD::Image<CVD::Rgb<CVD::byte> > &imRGB)
{
    Error error;

    imRGB.resize(mirSize);
    imBW.resize(mirSize);
    static FlyCapture2::Image imCap;
    error = cam.RetrieveBuffer(&imCap);
    if (error != PGRERROR_OK)
    {
        PrintErrorAndExit(error);
  
    }

  
    unsigned char *pImage = imCap.GetData();

    for (int y = 0; y<mirSize.y; y++) {
        for (int x = 0; x<mirSize.x; x++) {
            imRGB[y][x].blue = *pImage;
            pImage++;

            imRGB[y][x].green =*pImage;
            imBW[y][x] = *pImage;
            pImage++;

            imRGB[y][x].red = *pImage;
            pImage++;
        }
    }

}
コード例 #2
0
ファイル: Intrinsic.cpp プロジェクト: haochihlin/ICE-BA
void Intrinsic::UndistortionMap::Set(const Intrinsic &K) {
  const float sx = float(FTR_UNDIST_LUT_SIZE - 1) / K.w();
  const float sy = float(FTR_UNDIST_LUT_SIZE - 1) / K.h();
  m_fx = sx * K.k().m_fx; m_fy = sy * K.k().m_fy;
  m_cx = sx * K.k().m_cx; m_cy = sy * K.k().m_cy;
  const float fxI = 1.0f / m_fx, fyI = 1.0f / m_fy;
  m_xns.Resize(FTR_UNDIST_LUT_SIZE, FTR_UNDIST_LUT_SIZE);

#ifdef FTR_UNDIST_VERBOSE
  UT::PrintSeparator();
#endif
  Point2D xd, xn;
  float v;
  int x[2] = {FTR_UNDIST_LUT_SIZE / 2, FTR_UNDIST_LUT_SIZE / 2};
  int b[2][2] = {{x[0] - 1, x[0] + 1}, {x[1] - 1, x[1] + 1}};
  const int d[2] = {-1, 1};
  const int N = m_xns.Size();
  for(int i = 0, ix = 0, id = 0; i < N; ++i) {
    xd.x() = fxI * (x[0] - m_cx);
    xd.y() = fyI * (x[1] - m_cy);
    if (i == 0)
      xn = xd;
    else
      xn = xd * v;
    if (!K.Undistort(xd, &xn, NULL, NULL, true))
      exit(0);
    v = sqrtf(xn.SquaredLength() / xd.SquaredLength());
//#ifdef CFG_DEBUG
#if 0
    xn.x() = v;
#endif
    m_xns[x[1]][x[0]] = xn;
//#ifdef CFG_DEBUG
#if 0
    //UT::Print("%d %d\n", x[0], x[1]);
    const float s = 0.5f;
    const int _w = K.w(), _h = K.h();
    const float _fx = K.fx() * s, _fy = K.fy() * s;
    const float _cx = (_w - 1) * 0.5f, _cy = (_h - 1) * 0.5f;
    CVD::Image<CVD::Rgb<ubyte> > I;
    I.resize(CVD::ImageRef(_w, _h));
    I.zero();
    UT::ImageDrawCross(I, int(_fx * xd.x() + _cx), int(_fy * xd.y() + _cy), 5, CVD::Rgb<ubyte>(255, 0, 0));
    UT::ImageDrawCross(I, int(_fx * xn.x() + _cx), int(_fy * xn.y() + _cy), 5, CVD::Rgb<ubyte>(0, 255, 0));
    UT::ImageSave(UT::String("D:/tmp/test%04d.jpg", i), I);
#endif
    if (x[ix] == b[ix][id]) {
      b[ix][id] += d[id];
      ix = 1 - ix;
      if (ix == 0)
        id = 1 - id;
    }
    x[ix] += d[id];
  }
#ifdef FTR_UNDIST_VERBOSE
  UT::PrintSeparator();
#endif
}
コード例 #3
0
ファイル: DualMonoSLAMnodelet.cpp プロジェクト: bigjun/idSLAM
    // The image callback function for the master camera/ downward looking camera
    void imageCallback(const sensor_msgs::ImageConstPtr& img_msg) {
        if (isPTAMshouldstop) {
            cout << "PTAM SHOULD STOP ASSUMING OUTLIERS OR LANDING ALREADY!" << endl;
//            return;
        }

        // TODO: synchronise dual images, could be simply assuming that their stamps are close to each other
        // main function should start when both imgs have come.

        ros::Time msg_stamp = img_msg->header.stamp;
        frame_stamp_ = img_msg->header.stamp.toSec();
//        ROS_INFO("MASTER IMG TIMESTAMP: %f", frame_stamp_);

        if (ini_one_circle_ && !isFinishIniPTAMwithcircle)
        {
            if ( !isimage_ini_call || !isviconcall) return;
            else{
                convertImage(image_ini, 1);
                msg_stamp = image_ini->header.stamp;

                CVD::img_save(frameBW_, "inimage.jpg");
            }
        }
        else
            convertImage(img_msg, 1);

        // if ptam is initialised, forward image is now usable
        cursecimg_good = false;
        if ( isdualcam && secondimgcame )// && isFinishIniPTAMwithcircle)
        {
            double imginterval = fabs(img_msg->header.stamp.toSec()-img_secmsg->header.stamp.toSec());
            if ( imginterval < intervalmax_imgdual)
            {
                secondcamlock.lock();
                convertImage(img_secmsg, 2);
                secondcamlock.unlock();
                cursecimg_good = true;// only when img is synchronised and ptam ini already
            }
        }

        // pass image frame along to PTAM
        if (use_circle_ini_ && !isFinishIniPTAMwithcircle){
            // use ini pose and image from another node: simple_landing node
            if ((ini_one_circle_ || ini_two_circle_) && !tracker_->circle_pose_get ){
                tracker_->circle_pose_get = true;
                tracker_->se3CfromW = tfs2se3(vicon_state);// just need Twc
                tracker_->time_stamp_circle = vicon_state.header.stamp.toSec();
                tracker_->time_stamp_now = ros::Time::now().toSec();
            }

            // use ground information and IMU/EKF info.
            else if (ini_ground_ && !isEKFattitudeInicall && useEKFiniAtti){
                cout << "Waiting for attitude from EKF node..." << endl;
                return;
            }
            else if (ini_ground_ && !isattitudecall && !useEKFiniAtti){
                cout << "Waiting for attitude from IMU..." << endl;
                return;
            }
            else if (ini_ground_ && useEKFiniAtti && isEKFattitudeInicall ){
                if (!tracker_->attitude_get){

                    tracker_->se3IMU_camfromW = IMU2camWorldfromQuat(iniAttiEKF);//Tcw from Twi IMU calculated attitude

                    cout << "PTAM attitude got from EKF: "<< iniAttiEKF.w() <<", "
                                                << iniAttiEKF.x() <<", "
                                                << iniAttiEKF.y() <<", "
                                                << iniAttiEKF.z() <<endl;

                    tracker_->attitude_get = true;
                }
            }
            else if (ini_ground_ && !useEKFiniAtti && isattitudecall){
                if (!tracker_->attitude_get){
                    mAttitudelock.lock();
                    tracker_->se3IMU_camfromW = imu2camWorld(attitude_data_);//Twc from IMU calculated attitude
                    mAttitudelock.unlock();

                    cout << "PTAM pose got from IMU: "<< attitude_data_.roll <<", "
                            << attitude_data_.pitch<<endl;

                    tracker_->attitude_get = true;
                }
            }
        }

        if (use_ekf_pose && ispose_predicted_ekfcall) {
            mPosePredictedlock.lock();
            tracker_->se3CfromW_predicted_ekf.get_translation()[0] = pose_predicted_ekf_state_.pose.pose.position.x;
            tracker_->se3CfromW_predicted_ekf.get_translation()[1] = pose_predicted_ekf_state_.pose.pose.position.y;
            tracker_->se3CfromW_predicted_ekf.get_translation()[2] = pose_predicted_ekf_state_.pose.pose.position.z;
            tracker_->se3CfromW_predicted_ekf.get_rotation() = orientation_ekf_;
            tracker_->scale_ekf = scale_ekf_;// This is the scale of the pose estimation in the previous image frame
            mPosePredictedlock.unlock();
        }

        ros::Time time1 = ros::Time::now();


        // for debuging
//        static int published_num = 0;
//        if (published_num == 2)
//            return;

        // Should be able to input dual image in the callback function
        if (!cursecimg_good)
            tracker_->TrackFrame(frameBW_);
        else
            tracker_->TrackFrame(frameBW_, frameBW_sec);

        if (!isFinishIniPTAMwithcircle){
            camPoselast = tracker_->GetCurrentPose();
            camPosethis = camPoselast;
            camPose4pub = camPoselast;
        }
        else{
            camPosethis = tracker_->GetCurrentPose();
            camPose4pub = camPosethis;
        }
        isFinishIniPTAMwithcircle = true;

        ros::Time time2 = ros::Time::now();
        ros::Duration time3 = time2 - time1;
//        ROS_INFO("PTAM time cost: %f", time3.toSec());

        if (use_circle_ini_)
            cout << tracker_->GetMessageForUser() << " " << endl;
        else
            cout << tracker_->GetMessageForUser() << endl;

        // ignore those too low information, useful when landing
        // also currently, when outliers happen, should stop PTAM for safty.
        if ((ini_one_circle_ && tracker_->GetCurrentPose().inverse().get_translation()[2] < 0.3)
                ||(sqrt((camPosethis.get_translation()-camPoselast.get_translation())*
                        (camPosethis.get_translation()-camPoselast.get_translation()))>0.5))
        {
            isPTAMshouldstop = true;
//            return;
            // camposelast keep still, just publish it
            camPose4pub = camPoselast;
        }
        else
            camPoselast = camPosethis;
        // publish current pose in map in different cases
        if ( (use_circle_ini_ && !use_ekf_pose) || (use_ekf_pose && isEKFattitudeInicall)){
                publishPose(msg_stamp);
                publishPosewithCovariance(msg_stamp);
                inipose_published = true;

                // yang, publish landing object pose to set-point node?
                if (tracker_->isLandingpadPoseGet){
                    publish_inilandingpadpose(msg_stamp);
                    if (!tracker_->isFinishPadDetection)
                        publish_finallandingpadpose(msg_stamp);
                }
        }

        if (use_circle_ini_){
            static geometry_msgs::TransformStamped ptam_state;
            if (tracker_->ini_circle_finished)
                ptam_state.transform.rotation.w = 1;//only for state sign recognition
            else
                ptam_state.transform.rotation.w = 0;
            ini_circle_pub_.publish(ptam_state);
        }

        if (sendvisual){// && !isPTAMshouldstop){
            if (cam_marker_pub_.getNumSubscribers() > 0 || point_marker_pub_.getNumSubscribers() > 0)
                map_viz_->publishMapVisualization(map_,tracker_,cam_marker_pub_,point_marker_pub_, cam_marker_pub_sec, isdualcam);
            if ((landingpad_marker_pub_.getNumSubscribers() > 0) && tracker_->isLandingpadPoseGet)
                map_viz_->publishlandingpad(tracker_, landingpad_marker_pub_);
        }

        if (show_debug_image_) {
//            cv::Mat rgb_cv(480, 640, CV_8UC3, frameRGB_.data());
//            map_viz_->renderDebugImage(rgb_cv,tracker_,map_);
//            cv::imshow("downwards",rgb_cv);
//            cv::waitKey(10);
//            static int savedebugimg = 0;
//            if (savedebugimg == 0){
//                cv::imwrite("inimagedebug.jpg", rgb_cv);
//                savedebugimg = 1;
//            }
            if (cursecimg_good){
                cv::Mat rgb_cv(480, 640, CV_8UC3, frameRGB_sec.data());
                map_viz_->renderDebugImageSec(rgb_cv,tracker_,map_);
                cv::imshow("forwards",rgb_cv);
                cv::waitKey(10);
            }

            if (false)//(istrackPad)
            {
                CVD::Image<CVD::Rgb<CVD::byte> > frameRef;
                CVD::Image<CVD::byte> frameRefbw;
                frameRefbw.resize(tracker_->GetRefFrame().aLevels[0].im.size());
                frameRef.resize(tracker_->GetRefFrame().aLevels[0].im.size());
                CVD::copy(tracker_->GetRefFrame().aLevels[0].im, frameRefbw);
                //            memcpy(frameRefbw.data(), tracker_->GetRefFrame().aLevels[0].im.data(), tracker_->GetRefFrame().aLevels[0].im.totalsize());
                CVD::convert_image(frameRefbw, frameRef);
                cv::Mat rgb_ref(frameRef.size().y, frameRef.size().x, CV_8UC3, frameRef.data());
                map_viz_->renderRefImage(rgb_ref, tracker_);
                cv::imshow("ref", rgb_ref);
                cv::waitKey(10);
            }

        }
//        isviconcall = false;
    }