void Label::draw() {
	if(!visible) return;
	drawChildren();

	if(texture) {
		drawTexture(texture, getAbsPos(), z + .0001);
	}
	
	drawText(font, text.c_str(), textColor,
		getAbsPos().x, getAbsPos().y, z);
}
Пример #2
0
void DivNode::renderOutlines(const VertexArrayPtr& pVA, Pixel32 color)
{
    Pixel32 effColor = color;
    if (m_ElementOutlineColor != Pixel32(0,0,0,0)) {
        effColor = m_ElementOutlineColor;
        effColor.setA(128);
    }
    if (effColor != Pixel32(0,0,0,0)) {
        glm::vec2 size = getSize();
        if (size == glm::vec2(DEFAULT_SIZE, DEFAULT_SIZE)) {
            glm::vec2 p0 = getAbsPos(glm::vec2(-4, 0.5));
            glm::vec2 p1 = getAbsPos(glm::vec2(5, 0.5));
            glm::vec2 p2 = getAbsPos(glm::vec2(0.5, -4));
            glm::vec2 p3 = getAbsPos(glm::vec2(0.5, 5));
            pVA->addLineData(effColor, p0, p1, 1);
            pVA->addLineData(effColor, p2, p3, 1);
        } else {
            glm::vec2 p0 = getAbsPos(glm::vec2(0.5, 0.5));
            glm::vec2 p1 = getAbsPos(glm::vec2(size.x+0.5,0.5));
            glm::vec2 p2 = getAbsPos(glm::vec2(size.x+0.5,size.y+0.5));
            glm::vec2 p3 = getAbsPos(glm::vec2(0.5,size.y+0.5));
            pVA->addLineData(effColor, p0, p1, 1);
            pVA->addLineData(effColor, p1, p2, 1);
            pVA->addLineData(effColor, p2, p3, 1);
            pVA->addLineData(effColor, p3, p0, 1);
        }
    }
    for (unsigned i = 0; i < getNumChildren(); i++) {
        getChild(i)->renderOutlines(pVA, effColor);
    }
}
Пример #3
0
void AreaNode::renderOutlines(const VertexArrayPtr& pVA, Pixel32 parentColor)
{
    Pixel32 effColor = getEffectiveOutlineColor(parentColor);
    if (effColor != Pixel32(0,0,0,0)) {
        glm::vec2 size = getSize();
        glm::vec2 p0 = getAbsPos(glm::vec2(0.5, 0.5));
        glm::vec2 p1 = getAbsPos(glm::vec2(size.x+0.5,0.5));
        glm::vec2 p2 = getAbsPos(glm::vec2(size.x+0.5,size.y+0.5));
        glm::vec2 p3 = getAbsPos(glm::vec2(0.5,size.y+0.5));
        pVA->addLineData(effColor, p0, p1, 1);
        pVA->addLineData(effColor, p1, p2, 1);
        pVA->addLineData(effColor, p2, p3, 1);
        pVA->addLineData(effColor, p3, p0, 1);
    }
}
Пример #4
0
int processData(sensor_data *data) {
    printf("HI, we're in processData\n"); //This is broken, serves as a delay
    static int  start = SS_NUM;
    static int  counter = 0;
    static bool biasSet = false;
    bool        ss_flag = false;

    // initial data collection
    if (start) {
        start--;
        return 0;
    }

    // need to establish bias first
    if(!biasSet) {
        if (checkIfSS()) {
            radians_curr = getAbsPos();
            gyro_bias = getGyroBias();
            acc_bias = getAccBias();
            ss_flag = true;
            biasSet = true;
        } else {
            return 0;
        }
    }

    // periodically check if we are stationary
    if  (counter > SENSOR_FREQ/SS_CHECK_FREQ) {
        counter = 0;
        if(checkIfSS()){
            radians_curr = getAbsPos();
            gyro_bias = getGyroBias();
            acc_bias = getAccBias();
            ss_flag = true;
        }
    }

    // update current tilt
    if (!ss_flag) {
        radians_curr.roll  += (data->rotX)/SENSOR_FREQ;
        radians_curr.pitch += (data->rotY)/SENSOR_FREQ;
    }

    counter++;
    return 1;
}
Пример #5
0
void DivNode::renderOutlines(const VertexArrayPtr& pVA, Pixel32 parentColor)
{
    Pixel32 effColor = getEffectiveOutlineColor(parentColor);
    if (effColor != Pixel32(0,0,0,0)) {
        glm::vec2 size = getSize();
        if (size == glm::vec2(0,0)) {
            glm::vec2 p0 = getAbsPos(glm::vec2(-4, 0.5));
            glm::vec2 p1 = getAbsPos(glm::vec2(5, 0.5));
            glm::vec2 p2 = getAbsPos(glm::vec2(0.5, -4));
            glm::vec2 p3 = getAbsPos(glm::vec2(0.5, 5));
            pVA->addLineData(effColor, p0, p1, 1);
            pVA->addLineData(effColor, p2, p3, 1);
        } else {
            AreaNode::renderOutlines(pVA, parentColor);
        }
    }
    for (unsigned i = 0; i < getNumChildren(); i++) {
        getChild(i)->renderOutlines(pVA, effColor);
    }
}
Пример #6
0
		void				uiMover::onMousePressed(const Vec2& pos, uint32_t button)
		{
			if(button == 0)									// if it is a left click
			{
				if(getParent())
				{
					_last_pos = getAbsPos() + pos;
					_last_parent_pos = getParent()->getAbsPos();
				}
				uiMgr::current()->setCapture(this);					// and set exclusive mouse event listening
			}
		}
Пример #7
0
		void				uiMover::onMouseMoved(const Vec2& pos)
		{
			if(uiMgr::current()->getCapture().get_unsafe() == this)					// when mouse move, if the capturing UI is this one then ...
			{
				if(getParent())
				{
					Vec2 new_pos = _last_parent_pos + (pos + getAbsPos()) - _last_pos - (getParent()->getParent() ? getParent()->getParent()->getAbsPos() : 0);
					Vec2 clip_pos = new_pos;
					clip_pos.x = min(max(_movable_start.x, clip_pos.x), _movable_start.x + _movable_extent.x);
					clip_pos.y = min(max(_movable_start.y, clip_pos.y), _movable_start.y + _movable_extent.y);
					getParent()->setPos(clip_pos);							// move the parent
					if(_cb_on_mover_move.valid())
						_cb_on_mover_move(this);
				}
			}
		}
// a demuxer is also a king of hi-level parser
uint8_t		ADM_mpegDemuxer::sync( uint8_t *stream)
{
uint32_t val,hnt;
//uint64_t p,tail;

				val=0;
				hnt=0;			
			
			// preload
				hnt=(read8i()<<16) + (read8i()<<8) +read8i();
				if(_lastErr)
						{
							_lastErr=0;
							printf("\n io error , aborting sync\n");
							return 0;	
						}		
				while((hnt!=0x00001))
				{
					
					hnt<<=8;
					val=read8i();					
					hnt+=val;
					hnt&=0xffffff;	
					
					if(_lastErr)
					{
						_lastErr=0;
						// check if we are near the end, if so we abort	
						// else we continue
						uint64_t pos;
							pos=getAbsPos();
							//	if(_size-pos<1024)
							{
								printf("\n io error , aborting sync\n");
								return 0;	
							}
							printf("\n  Bumped into something at %llu / %llu but going on\n",pos,_size);
							if(!_size) assert(0);
							// else we go on...
							hnt=(read8i()<<16) + (read8i()<<8) +read8i();
					}
									
				}
				
	      *stream=read8i();
	      return 1;
}
Пример #9
0
bool
RODFNet::isDestination(const RODFDetector& det, ROEdge* edge, std::vector<ROEdge*>& seen,
                       const RODFDetectorCon& detectors) const {
    if (seen.size() == 1000) { // !!!
        WRITE_WARNING("Quitting checking for being a destination for detector '" + det.getID() + "' due to seen edge limit.");
        return false;
    }
    if (edge == getDetectorEdge(det)) {
        // maybe there is another detector at the same edge
        //  get the list of this/these detector(s)
        const std::vector<std::string>& detsOnEdge = myDetectorsOnEdges.find(edge)->second;
        for (std::vector<std::string>::const_iterator i = detsOnEdge.begin(); i != detsOnEdge.end(); ++i) {
            if ((*i) == det.getID()) {
                continue;
            }
            const RODFDetector& sec = detectors.getDetector(*i);
            if (getAbsPos(sec) > getAbsPos(det)) {
                // ok, there is another detector on the same edge and it is
                //  after this one -> no destination
                return false;
            }
        }
    }
    if (!hasApproached(edge)) {
        if (edge != getDetectorEdge(det)) {
            if (hasDetector(edge)) {
                return false;
            }
        }
        return true;
    }
    if (edge != getDetectorEdge(det)) {
        // ok, we are at one of the edges coming behind
        if (myAmInHighwayMode) {
            if (edge->getSpeed() >= 19.4) {
                if (hasDetector(edge)) {
                    // we are still on the highway and there is another detector
                    return false;
                }
            }
        }
    }

    if (myAmInHighwayMode) {
        if (edge->getSpeed() < 19.4 && edge != getDetectorEdge(det)) {
            if (hasDetector(edge)) {
                return true;
            }
            if (myApproachedEdges.find(edge)->second.size() > 1) {
                return true;
            }

        }
    }

    if (myDetectorsOnEdges.find(edge) != myDetectorsOnEdges.end()
            &&
            myDetectorEdges.find(det.getID())->second != edge) {
        return false;
    }
    const std::vector<ROEdge*>& appr  = myApproachedEdges.find(edge)->second;
    bool isall = true;
    size_t no = 0;
    seen.push_back(edge);
    for (size_t i = 0; i < appr.size() && isall; i++) {
        bool had = std::find(seen.begin(), seen.end(), appr[i]) != seen.end();
        if (!had) {
            if (!isDestination(det, appr[i], seen, detectors)) {
                no++;
                isall = false;
            }
        }
    }
    return isall;
}
Пример #10
0
bool
RODFNet::isSource(const RODFDetector& det, ROEdge* edge,
                  std::vector<ROEdge*>& seen,
                  const RODFDetectorCon& detectors,
                  bool strict) const {
    if (seen.size() == 1000) { // !!!
        WRITE_WARNING("Quitting checking for being a source for detector '" + det.getID() + "' due to seen edge limit.");
        return false;
    }
    if (edge == getDetectorEdge(det)) {
        // maybe there is another detector at the same edge
        //  get the list of this/these detector(s)
        const std::vector<std::string>& detsOnEdge = myDetectorsOnEdges.find(edge)->second;
        for (std::vector<std::string>::const_iterator i = detsOnEdge.begin(); i != detsOnEdge.end(); ++i) {
            if ((*i) == det.getID()) {
                continue;
            }
            const RODFDetector& sec = detectors.getDetector(*i);
            if (getAbsPos(sec) < getAbsPos(det)) {
                // ok, there is another detector on the same edge and it is
                //  before this one -> no source
                return false;
            }
        }
    }
    // it's a source if no edges are approaching the edge
    if (!hasApproaching(edge)) {
        if (edge != getDetectorEdge(det)) {
            if (hasDetector(edge)) {
                return false;
            }
        }
        return true;
    }
    if (edge != getDetectorEdge(det)) {
        // ok, we are at one of the edges in front
        if (myAmInHighwayMode) {
            if (edge->getSpeed() >= 19.4) {
                if (hasDetector(edge)) {
                    // we are still on the highway and there is another detector
                    return false;
                }
                // the next is a hack for the A100 scenario...
                //  We have to look into further edges herein edges
                const std::vector<ROEdge*>& appr = myApproachingEdges.find(edge)->second;
                size_t noOk = 0;
                size_t noFalse = 0;
                size_t noSkipped = 0;
                for (size_t i = 0; i < appr.size(); i++) {
                    if (!hasDetector(appr[i])) {
                        noOk++;
                    } else {
                        noFalse++;
                    }
                }
                if ((noFalse + noSkipped) == appr.size()) {
                    return false;
                }
            }
        }
    }

    if (myAmInHighwayMode) {
        if (edge->getSpeed() < 19.4 && edge != getDetectorEdge(det)) {
            // we have left the highway already
            //  -> the detector will be a highway source
            if (!hasDetector(edge)) {
                return true;
            }
        }
    }
    if (myDetectorsOnEdges.find(edge) != myDetectorsOnEdges.end()
            &&
            myDetectorEdges.find(det.getID())->second != edge) {
        return false;
    }

    // let's check the edges in front
    const std::vector<ROEdge*>& appr = myApproachingEdges.find(edge)->second;
    size_t noOk = 0;
    size_t noFalse = 0;
    size_t noSkipped = 0;
    seen.push_back(edge);
    for (size_t i = 0; i < appr.size(); i++) {
        bool had = std::find(seen.begin(), seen.end(), appr[i]) != seen.end();
        if (!had) {
            if (isSource(det, appr[i], seen, detectors, strict)) {
                noOk++;
            } else {
                noFalse++;
            }
        } else {
            noSkipped++;
        }
    }
    if (!strict) {
        return (noFalse + noSkipped) != appr.size();
    } else {
        return (noOk + noSkipped) == appr.size();
    }
}
cv::Mat motionEstimation_node::imageCallback(sensor_msgs::ImagePtr &left, sensor_msgs::ImagePtr &right)
{
   /* try
    {
        cv::imshow("left", cv_bridge::toCvShare(left, "bgr8")->image);
        cv::imshow("right", cv_bridge::toCvShare(right, "bgr8")->image);
        cv::waitKey(30);
    }*/
      if(image_sub_count == 0)
        {

        //right;
          features = getStrongFeaturePoints(image_L1, 100, 0.001, 20);

          refindFeaturePoints(left, right, features, points_L1_temp, points_R1_temp);
           if (10 > points_L1_temp.size())
           {
               ROS_INFO("Could not find more than features in stereo 1:");
           }
           else
           {
               camera.left = left;
               camera.right = right;
               cam.push_back(camera);

           }

        }

       image_sub_count++;

      while(image_sub_count > 0)
      {
              skipFrameNumber++;
          if(4 < skipFrameNumber)
          {
             ROS_INFO("NO MOVEMENT FOR LAST 4 FRAMES");
             image_sub_count = 0;
             break;
          }
          refindFeaturePoints(camera.left, left, points_L1_temp, points_L1, points_L2);
          refindFeaturePoints(camera.right, right, points_R1_temp, points_R1, points_R2);
          deleteUnvisiblePoints(points_L1_temp, points_R1_temp, points_L1, points_R1, points_L2, points_R2, camera.left.cols, camera.left.rows);

          if(0 == points_L1.size())
          {
              image_sub_count--;
              ROS_INFO("Could not find more than features in stereo 2:");
              break;
          }

          if (1 == mode)
          {
             if (8 > points_L1.size())
             {
                ROS_INFO("NO MOVEMENT: to less points found");
                image_sub_count--;
                break;
             }

             getInliersFromHorizontalDirection(make_pair(points_L1, points_R1), inliersHorizontal_L1, inliersHorizontal_R1);
             getInliersFromHorizontalDirection(make_pair(points_L2, points_R2), inliersHorizontal_L2, inliersHorizontal_R2);
             deleteZeroLines(points_L1, points_R1, points_L2, points_R2, inliersHorizontal_L1, inliersHorizontal_R1, inliersHorizontal_L2, inliersHorizontal_R2);

             if (8 > inliersHorizontal_L1.size())
             {
                ROS_INFO("NO MOVEMENT: couldn't find horizontal points... probably rectification fails or to less feature points found?!");
                image_sub_count--;
                break;
             }

            foundF_L = getFundamentalMatrix(points_L1, points_L2, &inliersF_L1, &inliersF_L2, F_L);
            foundF_R = getFundamentalMatrix(points_R1, points_R2, &inliersF_R1, &inliersF_R2, F_R);
            deleteZeroLines(inliersF_L1, inliersF_L2, inliersF_R1, inliersF_R2);

            if (1 > inliersF_L1.size())
            {
                ROS_INFO("NO MOVEMENT: couldn't find enough ransac inlier");
                image_sub_count--;
                break;
            }

            drawCorresPoints(camera.left, inliersF_L1, inliersF_L2, "inlier F left " , CV_RGB(0,0,255));
            drawCorresPoints(camera.right, inliersF_R1, inliersF_R2, "inlier F right " , CV_RGB(0,0,255));
            bool poseEstimationFoundES_L = false;
            bool poseEstimationFoundES_R = false;

            if(foundF_L)
            {
              poseEstimationFoundES_L = motionEstimationEssentialMat(inliersF_L1, inliersF_L2, F_L, K_L, T_E_L, R_E_L);
            }

            if(foundF_R)
            {
              poseEstimationFoundES_R = motionEstimationEssentialMat(inliersF_R1, inliersF_R2, F_R, K_R, T_E_R, R_E_R);
            }

            if (!poseEstimationFoundES_L && !poseEstimationFoundES_R)
            {
               image_sub_count--;
               break;
               T_E_L = cv::Mat::zeros(3, 1, CV_32F);
               R_E_L = cv::Mat::eye(3, 3, CV_32F);
               T_E_R = cv::Mat::zeros(3, 1, CV_32F);
               R_E_R = cv::Mat::eye(3, 3, CV_32F);
            }

            else if (!poseEstimationFoundES_L)
            {
               T_E_L = cv::Mat::zeros(3, 1, CV_32F);
               R_E_L = cv::Mat::eye(3, 3, CV_32F);
            }

            else if (!poseEstimationFoundES_R)
            {
               T_E_R = cv::Mat::zeros(3, 1, CV_32F);
               R_E_R = cv::Mat::eye(3, 3, CV_32F);
            }

            cv::Mat PK_0 = K_L * P_0;
            cv::Mat PK_LR = K_R * P_LR;

            TriangulatePointsHZ(PK_0, PK_LR, points_L1, points_R1, 0, pointCloud_1);
            TriangulatePointsHZ(PK_0, PK_LR, points_L2, points_R2, 0, pointCloud_2);
#if 1
            composeProjectionMat(T_E_L, R_E_L, P_L);
            composeProjectionMat(T_E_R, R_E_R, P_R);
            cv::Mat PK_L = K_L * P_L;
            cv::Mat PK_R = K_R * P_R;
            getScaleFactor(PK_0, PK_LR, PK_L, PK_R, points_L1, points_R1, points_L2, points_R2, u_L1, u_R1, stereoCloud, nearestPoints);
            ROS_INFO( "skipFrameNumber :  %d",  skipFrameNumber);
            if(u_L1 < -1 || u_L1 > 1000*skipFrameNumber)
            {
               ROS_INFO("scale factors for left cam is too big: %d ", u_L1);
               //skipFrame = true;
               //continue;
            }
            else
            {
                T_E_L = T_E_L * u_L1;
            }

            if(u_R1 < -1 || u_R1 > 1000*skipFrameNumber )
            {
                ROS_INFO( "scale factors for right cam is too big: %d ", u_R1);
                                //skipFrame = true;
                                //continue;
            }
            else
            {
                T_E_R = T_E_R * u_R1;
            }

#if 0
                // get RGB values for pointcloud representation
                std::vector<cv::Vec3b> RGBValues;
                for (unsigned int i = 0; i < points_L1.size(); ++i){
                    uchar grey = camera.left.at<uchar>(points_L1[i].x, points_L1[i].y);
                    RGBValues.push_back(cv::Vec3b(grey,grey,grey));
                }

                std::vector<cv::Vec3b> red;
                for (unsigned int i = 0; i < 5; ++i){
                    red.push_back(cv::Vec3b(0,0,255));
                }

                AddPointcloudToVisualizer(stereoCloud, "cloud1" + std::to_string(frame1), RGBValues);
                AddPointcloudToVisualizer(nearestPoints, "cloud2" + std::to_string(frame1), red);
#endif
#else
                // 2. method:
                float u_L2, u_R2;
                getScaleFactor2(T_LR, R_LR, T_E_L, R_E_L, T_E_R, u_L2, u_R2);

                if(u_L2 < -1000 || u_R2 < -1000 || u_L2 > 1000 || u_R2 > 1000 ){
                    std::cout << "scale factors to small or to big:  L: " << u_L2 << "  R: " << u_R2  << std::endl;
                } else {
                    T_E_L = T_E_L * u_L2;
                    T_E_R = T_E_R * u_R2;
                }

                //compare both methods
                ROS_INFO("u links  2:%d ", u_L2);
                ROS_INFO("u rechts 2:%d", u_R2);
#endif
                ROS_INFO("translation 1:%d ", T_E_L);
                cv::Mat newTrans3D_E_L;
                getNewTrans3D( T_E_L, R_E_L, newTrans3D_E_L);


                cv::Mat newPos_ES_L;
                getAbsPos(currentPos_ES_L, newTrans3D_E_L, R_E_L.t(), newPos_ES_L);
                cv::Mat rotation_ES_L, translation_ES_L;
                decomposeProjectionMat(newPos_ES_L, translation_ES_L, rotation_ES_L);
                cv::Mat newPos_ES_mean = newPos_ES_L + newPos_ES_R;
                newPos_ES_mean /= 2;

                cv::Mat rotation_ES_mean, translation_ES_mean;
                decomposeProjectionMat(newPos_ES_mean, translation_ES_mean, rotation_ES_mean);
                geometry_msgs::Pose pose;
                pose.position.x = rotation_ES_mean(0);
                pose.position.y = rotation_ES_mean(1);
                pose.position.z = rotation_ES_mean(2);
                 posePublisher_.publish(pose);
                Eigen::Matrix3f m;
                m = translation_ES_mean;
                float roll;
                float pitch;
                float yaw;
                roll = atan2(m(3,2), m(3,3));
                pitch = atan(-m(3,1)/(m(3,2)*sin(roll)+m(3,3)*cos(roll)));
                yaw = atan2(m(2,1),m(1,1));

                currentPos_ES_mean = newPos_ES_mean;
                currentPos_ES_L = newPos_ES_L;
                currentPos_ES_R = newPos_ES_R;

                ROS_INFO("abs. position  ", translation_ES_mean);


                

          }

          if(2 == mode)
          {
              if (8 > points_L1.size())
              {
                  ROS_INFO("NO MOVEMENT: to less points found");
                  image_sub_count--;
                  break;
              }

              std::vector<cv::Point2f> inliersHorizontal_L1, inliersHorizontal_R1, inliersHorizontal_L2, inliersHorizontal_R2;
              getInliersFromHorizontalDirection(make_pair(points_L1, points_R1), inliersHorizontal_L1, inliersHorizontal_R1);
              getInliersFromHorizontalDirection(make_pair(points_L2, points_R2), inliersHorizontal_L2, inliersHorizontal_R2);
              //delete all points that are not correctly found in stereo setup
              deleteZeroLines(points_L1, points_R1, points_L2, points_R2, inliersHorizontal_L1, inliersHorizontal_R1, inliersHorizontal_L2, inliersHorizontal_R2);

              if (8 > points_L1.size())
              {
                   ROS_INFO("NO MOVEMENT: couldn't find horizontal points... probably rectification fails or to less feature points found?!");
                   image_sub_count--;
                   break;
              }

              cv::Mat F_L;
              bool foundF_L;
              std::vector<cv::Point2f> inliersF_L1, inliersF_L2;
              foundF_L = getFundamentalMatrix(points_L1, points_L2, &inliersF_L1, &inliersF_L2, F_L);

              // compute fundemental matrix F_R1R2 and get inliers from Ransac
              cv::Mat F_R;
              bool foundF_R;
              std::vector<cv::Point2f> inliersF_R1, inliersF_R2;
              foundF_R = getFundamentalMatrix(points_R1, points_R2, &inliersF_R1, &inliersF_R2, F_R);

              // make sure that there are all inliers in all frames.
              deleteZeroLines(inliersF_L1, inliersF_L2, inliersF_R1, inliersF_R2);

              drawCorresPoints(image_R1, inliersF_R1, inliersF_R2, "inlier F right " , CV_RGB(0,0,255));
              drawCorresPoints(image_L1, inliersF_L1, inliersF_L2, "inlier F left " , CV_RGB(0,0,255));

              // calibrate projection mat
              cv::Mat PK_0 = K_L * P_0;
              cv::Mat PK_LR = K_R * P_LR;

              std::vector<cv::Point3f> pointCloud_1, pointCloud_2;
              TriangulatePointsHZ(PK_0, PK_LR, inliersF_L1, inliersF_R1, 0, pointCloud_1);
              TriangulatePointsHZ(PK_0, PK_LR, inliersF_L2, inliersF_R2, 0, pointCloud_2);

#if 1
                //LEFT:
                bool poseEstimationFoundTemp_L = false;
                cv::Mat T_PnP_L, R_PnP_L;
                if(foundF_L){
                    // GUESS TRANSLATION + ROTATION UP TO SCALE!!!
                    poseEstimationFoundTemp_L = motionEstimationEssentialMat(inliersF_L1, inliersF_L2, F_L, K_L, T_PnP_L, R_PnP_L);
                }

                if (!poseEstimationFoundTemp_L){
                    image_sub_count--;
                    break;
                }

#if 0
                // scale factor:
                float u_L1;
                cv::Mat P_L;
                composeProjectionMat(T_PnP_L, R_PnP_L, P_L);

                // calibrate projection mat
                cv::Mat PK_L = K_L * P_L;

                getScaleFactorLeft(PK_0, PK_LR, PK_L, inliersF_L1, inliersF_R1, inliersF_L2, u_L1);
                if(u_L1 < -1 || u_L1 > 1000 ){
                    ROS_INFO("scale factors to small or to big:  L: ", u_L1);
                    image_sub_count--;
                    break;
                }

                T_PnP_L = T_PnP_L * u_L1;
#endif

                bool poseEstimationFoundPnP_L = motionEstimationPnP(inliersF_L2, pointCloud_1, K_L, T_PnP_L, R_PnP_L);

                if (!poseEstimationFoundPnP_L)
                {
                      image_sub_count--;
                      break;
                }

                if(cv::norm(T_PnP_L) > 1500.0 * skipFrameNumber)
                {
                  // this is bad...
                  ROS_INFO("NO MOVEMENT: estimated camera movement is too big, skip this camera.. T = ", cv::norm(T_PnP_L));
                  image_sub_count--;
                  break;
                }

                cv::Mat newTrans3D_PnP_L;
                getNewTrans3D( T_PnP_L, R_PnP_L, newTrans3D_PnP_L);

                cv::Mat newPos_PnP_L;
                getAbsPos(currentPos_PnP_L, newTrans3D_PnP_L, R_PnP_L, newPos_PnP_L);

                cv::Mat rotation_PnP_L, translation_PnP_L;
                decomposeProjectionMat(newPos_PnP_L, translation_PnP_L, rotation_PnP_L);

                pose.position.x = rotation_PnP_L(0);
                pose.position.y = rotation_PnP_L(1);
                pose.position.z = rotation_PnP_L(2);
                posePublisher_.publish(pose);
                Eigen::Matrix3f m;
                m = translation_ES_mean;
                float roll;
                float pitch;
                float yaw;
                roll = atan2(m(3,2), m(3,3));
                pitch = atan(-m(3,1)/(m(3,2)*sin(roll)+m(3,3)*cos(roll)));
                yaw = atan2(m(2,1),m(1,1));
                currentPos_PnP_L  = newPos_PnP_L ;
#else
              //RIGHT:
                             bool poseEstimationFoundTemp_R = false;
                             cv::Mat  T_PnP_R, R_PnP_R;
                             if(foundF_R){
                                 // GUESS TRANSLATION + ROTATION UP TO SCALE!!!
                                 poseEstimationFoundTemp_R = motionEstimationEssentialMat(inliersF_R1, inliersF_R2, F_R, K_R, KInv_R, T_PnP_R, R_PnP_R);
                             }

                             if (!poseEstimationFoundTemp_R){
                                 skipFrame = true;
                                 continue;
                             }

                             // use initial guess values for pose estimation
                             bool poseEstimationFoundPnP_R = motionEstimationPnP(inliersF_R2, pointCloud_1, K_R, T_PnP_R, R_PnP_R);

                             if (!poseEstimationFoundPnP_R){
                                 skipFrame = true;
                                 continue;
                             }

                             cv::Mat newTrans3D_PnP_R;
                             getNewTrans3D( T_PnP_R, R_PnP_R, newTrans3D_PnP_R);

                             cv::Mat newPos_PnP_R;
                             getAbsPos(currentPos_PnP_R, newTrans3D_PnP_R, R_PnP_R, newPos_PnP_R);

                             cv::Mat rotation_PnP_R, translation_PnP_R;
                             decomposeProjectionMat(newPos_PnP_R, translation_PnP_R, rotation_PnP_R);
                             pose.position.x = rotation_PnP_R(0);
                             pose.position.y = rotation_PnP_R(1);
                             pose.position.z = rotation_PnP_R(2);
                             posePublisher_.publish(pose);
                             Eigen::Matrix3f m;
                             m = translation_ES_mean;
                             float roll;
                             float pitch;
                             float yaw;
                             roll = atan2(m(3,2), m(3,3));
                             pitch = atan(-m(3,1)/(m(3,2)*sin(roll)+m(3,3)*cos(roll)));
                             yaw = atan2(m(2,1),m(1,1));
                             currentPos_PnP_R  = newPos_PnP_R ;
#endif
          }

                             if (3 == mode)
                             {
                                 std::vector<cv::Point2f> inliersHorizontal_L1, inliersHorizontal_R1, inliersHorizontal_L2, inliersHorizontal_R2;
                                 getInliersFromHorizontalDirection(make_pair(points_L1, points_R1), inliersHorizontal_L1, inliersHorizontal_R1);
                                 getInliersFromHorizontalDirection(make_pair(points_L2, points_R2), inliersHorizontal_L2, inliersHorizontal_R2);
                                 //delete all points that are not correctly found in stereo setup
                                 deleteZeroLines(points_L1, points_R1, points_L2, points_R2, inliersHorizontal_L1, inliersHorizontal_R1, inliersHorizontal_L2, inliersHorizontal_R2);

                                 if (8 > points_L1.size())
                                 {
                                     ROS_INFO("NO MOVEMENT: couldn't find horizontal points... probably rectification fails or to less feature points found?!");
                                     image_sub_count--;
                                     break;
                                 }

                                 drawCorresPoints(image_L1, points_L1, points_R1, "inlier F1 links rechts", cv::Scalar(255,255,0));
                                 drawCorresPoints(image_L2, points_L2, points_R2, "inlier F2 links rechts", cv::Scalar(255,255,0));

                                                // calibrate projection mat
                                 cv::Mat PK_0 = K_L * P_0;
                                 cv::Mat PK_LR = K_R * P_LR;

                                                // TRIANGULATE POINTS
                                 std::vector<cv::Point3f> pointCloud_1, pointCloud_2;
                                 TriangulatePointsHZ(PK_0, PK_LR, points_L1, points_R1, 0, pointCloud_1);
                                 TriangulatePointsHZ(PK_0, PK_LR, points_L2, points_R2, 0, pointCloud_2);


                                 float reproj_error_1L = calculateReprojectionErrorHZ(PK_0, points_L1, pointCloud_1);
                                 float reproj_error_1R = calculateReprojectionErrorHZ(PK_LR, points_R1, pointCloud_1);

                                 if (!positionCheck(P_0, pointCloud_2) && !positionCheck(P_LR, pointCloud_2) && reproj_error_2L < 10.0 && reproj_error_2R < 10.0 )
                                 {
                                        ROS_INFO("second pointcloud seem's to be not perfect..");
                                        image_sub_count--;
                                        break;
                                 }

#if 0
                //load disparity map
                cv::Mat dispMap1;
                cv::FileStorage fs_dist1(dataPath + "disparity/disparity_"+to_string(frame)+".yml", cv::FileStorage::READ);
                fs_dist1["disparity"] >> dispMap1;
                fs_dist1.release();

                cv::Mat dispMap2;
                cv::FileStorage fs_dist2(dataPath + "disparity/disparity_"+to_string(frame+1)+".yml", cv::FileStorage::READ);
                fs_dist2["disparity"] >> dispMap2;
                fs_dist2.release();

                dispMap1.convertTo(dispMap1, CV_32F);
                dispMap2.convertTo(dispMap2, CV_32F);

                std::vector <cv::Mat_<float>> cloud1;
                std::vector <cv::Mat_<float>> cloud2;
                for(unsigned int i = 0; i < inlier_median_L1.size(); ++i){
                    cv::Mat_<float> point3D1(1,4);
                    cv::Mat_<float> point3D2(1,4);
                    calcCoordinate(point3D1, Q, dispMap1, inlier_median_L1[i].x, inlier_median_L1[i].y);
                    calcCoordinate(point3D2, Q, dispMap2, inlier_median_L2[i].x, inlier_median_L2[i].y);
                    cloud1.push_back(point3D1);
                    cloud2.push_back(point3D2);
                }

                std::vector<cv::Point3f> pcloud1, pcloud2;
                std::vector<cv::Vec3b> rgb1, rgb2;
                for (unsigned int i = 0; i < cloud1.size(); ++i) {
                    if (!cloud1[i].empty() && !cloud2[i].empty()){
                        pcloud1.push_back(cv::Point3f(cloud1[i](0), cloud1[i](1), cloud1[i](2) ));
                                          pcloud2.push_back(cv::Point3f(cloud2[i](0), cloud2[i](1), cloud2[i](2) ));
                                                            rgb1.push_back(cv::Vec3b(255,0,0));
                                          rgb2.push_back(cv::Vec3b(0,255,0));
                    }
                }

                AddPointcloudToVisualizer(pcloud1, "pcloud1", rgb1);
                AddPointcloudToVisualizer(pcloud2, "pcloud2", rgb2);

                cv::Mat T_Stereo, R_Stereo;
                bool poseEstimationFoundStereo = motionEstimationStereoCloudMatching(pcloud1, pcloud2, T_Stereo, R_Stereo);

#else

                cv::Mat T_Stereo, R_Stereo;
                bool poseEstimationFoundStereo = motionEstimationStereoCloudMatching(pointCloud_1, pointCloud_2, T_Stereo, R_Stereo);
#endif

                if (!poseEstimationFoundStereo)
                {
                      image_sub_count--;
                      break;
                }
                ROS_INFO("ROTATION \n");
                ROS_INFO(R_Stereo);
                ROS_INFO("\n TRANSLATION \n");
                ROS_INFO(T_Stereo);

                float x_angle, y_angle, z_angle;
                decomposeRotMat(R_Stereo, x_angle, y_angle, z_angle);
                ROS_INFO("x angle:", x_angle);
                ROS_INFO("y angle:", y_angle);
                ROS_INFO("z angle:", z_angle);

                cv::Mat newTrans3D_Stereo;
                getNewTrans3D( T_Stereo, R_Stereo, newTrans3D_Stereo);

                cv::Mat newPos_Stereo;
                getAbsPos(currentPos_Stereo, newTrans3D_Stereo, R_Stereo, newPos_Stereo);

                cv::Mat rotation, translation;
                decomposeProjectionMat(newPos_Stereo, translation, rotation);

                pose.position.x = rotation(0);
                pose.position.y = rotation(1);
                pose.position.z = rotation(2);
                posePublisher_.publish(pose);
                Eigen::Matrix3f m;
                m = translation;
                float roll;
                float pitch;
                float yaw;
                roll = atan2(m(3,2), m(3,3));
                pitch = atan(-m(3,1)/(m(3,2)*sin(roll)+m(3,3)*cos(roll)));
                yaw = atan2(m(2,1),m(1,1));

                currentPos_Stereo = newPos_Stereo;



                             }

                             if (4 == mode)

                             {
                                             // ######################## TRIANGULATION TEST ################################
                                             // get inlier from stereo constraints
                                             std::vector<cv::Point2f> inliersHorizontal_L1, inliersHorizontal_R1, inliersHorizontal_L2, inliersHorizontal_R2;
                                             getInliersFromHorizontalDirection(make_pair(points_L1, points_R1), inliersHorizontal_L1, inliersHorizontal_R1);
                                             getInliersFromHorizontalDirection(make_pair(points_L2, points_R2), inliersHorizontal_L2, inliersHorizontal_R2);
                                             //delete all points that are not correctly found in stereo setup
                                             deleteZeroLines(points_L1, points_R1, points_L2, points_R2, inliersHorizontal_L1, inliersHorizontal_R1, inliersHorizontal_L2, inliersHorizontal_R2);

                                             drawCorresPoints(image_L1, points_L1, points_R1, "inlier 1 " , CV_RGB(0,0,255));
                                             drawCorresPoints(image_R1, points_L2, points_R2, "inlier 2 " , CV_RGB(0,0,255));

                                             if(0 == points_L1.size()){
                                                 image_sub_count--;
                                                 break;
                                             }

                                             // calibrate projection mat
                                             cv::Mat PK_0 = K_L * P_0;
                                             cv::Mat PK_LR = K_R * P_LR;

                                             std::vector<cv::Point3f> pointCloud_1, pointCloud_2;
                                             TriangulatePointsHZ(PK_0, PK_LR, points_L1, points_R1, 0, pointCloud_1);
                                             TriangulatePointsHZ(PK_0, PK_LR, points_L2, points_R2, 0, pointCloud_2);



                                             if(0 == pointCloud_1.size())
                                             {
                                                  ROS_INFO("horizontal inlier: can't find any corresponding points in all 4 frames' ");
                                                  image_sub_count--;
                                                  break;
                                             }


                                                             // get RGB values for pointcloud representation
                                              std::vector<cv::Vec3b> RGBValues;
                                              for (unsigned int i = 0; i < points_L1.size(); ++i)
                                              {
                                                   uchar grey = image_L1.at<uchar>(points_L1[i].x, points_L1[i].y);
                                                   RGBValues.push_back(cv::Vec3b(grey,grey,grey));
                                              }

                                                   AddPointcloudToVisualizer(pointCloud_1, "cloud1" + std::to_string(frame1), RGBValues);

#if 1
                                             //                int index = 0;
                                             //                for (auto i : pointCloud_1) {
                                             //                    float length = sqrt( i.x*i.x + i.y*i.y + i.z*i.z);
                                             //                    cout<< "HZ:  "<< index << ":  " << i << "   length: " << length << endl;
                                             //                    ++index;
                                             //                }
                                                   std::vector<cv::Point3f> pcloud_CV;
                                                   TriangulateOpenCV(PK_0, PK_LR, points_L1, points_R1, pcloud_CV);

                                             //                index = 0;
                                             //                for (auto i : pcloud_CV) {
                                             //                    float length = sqrt( i.x*i.x + i.y*i.y + i.z*i.z);
                                             //                    cout<< "CV:  "<< index << ":  " << i << "   length: " << length << endl;
                                             //                    ++index;
                                             //                }
                                                             std::vector<cv::Vec3b> RGBValues2;
                                                             for (unsigned int i = 0; i < points_L1.size(); ++i){
                                                                 //uchar grey2 = image_L2.at<uchar>(points_L2[i].x, points_L2[i].y);
                                                                 //RGBValues2.push_back(cv::Vec3b(grey2,grey2,grey2));
                                                                 RGBValues2.push_back(cv::Vec3b(255,0,0));
                                                             }

                                                             AddPointcloudToVisualizer(pcloud_CV, "cloud2" + std::to_string(frame1), RGBValues2);
#endif



          }


      }
Пример #12
0
qreal MapObj::y() 
{
    return getAbsPos().y();
}
Пример #13
0
qreal MapObj::x() 
{
    return getAbsPos().x();
}