示例#1
0
	inline Localization(vector<ObstacleC> *obstacles) :
			location(-1.42, 0), locationKalman(location), kalmanI(
					locationKalman), _cameraProjections(
			NULL), lastOdomX(0), lastOdomY(0), globalPos(0, 0, 0), lastOdom(0,
					0, 0), lastOdomID(0), ballPos(0, 0), m_field(
					field_model::FieldModel::getInstance()), obstacles(
					obstacles), CurrentVertexId(0), PreviousVertexId(0), LandmarkCount(
					0), odomLastGet(0, 0), atLeastOneObservation(false), nodeCounter(
					0)
	{

		odom_sub = nodeHandle.subscribe("/gait/odometry", 10,
				&Localization::dead_reckoning_callback, this);
		setLoc_sub = nodeHandle.subscribe<geometry_msgs::Point>(
				"/vision/setLocation", 1, &Localization::setloc_callback, this);

		A = m_field->length();
		B = m_field->width();
		E = m_field->goalAreaDepth();
		F = m_field->goalAreaWidth();
		G = m_field->penaltyMarkerDist();
		H = m_field->centerCircleDiameter();
		D = m_field->goalWidth();
		I = m_field->boundary();
		params.ball->radius.set(m_field->ballDiameter() / 2.);
		A2 = A / 2.;
		B2 = B / 2.;
		H2 = H / 2.;
		A2 = A / 2.;
		B2 = B / 2.;
		E2 = E / 2.;
		F2 = F / 2.;
		G2 = G / 2.;
		H2 = H / 2.;
		D2 = D / 2.;
		I2 = I / 2.;

		// TF transforms
		tfLocField.frame_id_ = "/ego_floor";
		tfLocField.child_frame_id_ = "/loc_field";
		tfLocField.setIdentity();

		lastSavedNodeTime = ros::Time::now();
		// create the linear solver
		linearSolver = new LinearSolverCSparse<BlockSolverX::PoseMatrixType>();
		// create the block solver on the top of the linear solver
		blockSolver = new BlockSolverX(linearSolver);
		//create the algorithm to carry out the optimization
		optimizationAlgorithm = new OptimizationAlgorithmLevenberg(blockSolver);
	}
    // wait for one camerainfo, then shut down that subscriber
    void cam_info_callback(const sensor_msgs::CameraInfo &msg)
    {
        camParam = aruco_ros::rosCameraInfo2ArucoCamParams(msg, useRectifiedImages);

        // handle cartesian offset between stereo pairs
        // see the sensor_msgs/CamaraInfo documentation for details
        rightToLeft.setIdentity();
        rightToLeft.setOrigin(
            tf::Vector3(
                -msg.P[3]/msg.P[0],
                -msg.P[7]/msg.P[5],
                0.0));

        cam_info_received = true;
        cam_info_sub.shutdown();
    }
示例#3
0
  TyErrorId processWithLock(CAS &tcas, ResultSpecification const &res_spec)
  {
    MEASURE_TIME;
    outInfo("process begins");
    rs::SceneCas cas(tcas);
    rs::Scene scene = cas.getScene();

    cas.get(VIEW_CLOUD, *cloud);
    cas.get(VIEW_COLOR_IMAGE, color);
    cas.get(VIEW_DEPTH_IMAGE, depth);
    cas.get(VIEW_CAMERA_INFO, cameraInfo);

    indices->clear();
    indices->reserve(cloud->points.size());

    camToWorld.setIdentity();
    if(scene.viewPoint.has())
    {
      rs::conversion::from(scene.viewPoint.get(), camToWorld);
    }
    else
    {
      outWarn("No camera to world transformation, no further processing!");
      throw rs::FrameFilterException();
    }
    worldToCam = tf::StampedTransform(camToWorld.inverse(), camToWorld.stamp_, camToWorld.child_frame_id_, camToWorld.frame_id_);
    computeFrustum();

    //default place to look for objects is counter tops except if we got queried for some different place
    //message comes from desigantor and is not the same as the entries from the semantic map so we need
    //to transform them
    rs::Query qs = rs::create<rs::Query>(tcas);
    std::vector<std::string> regionsToLookAt;
    regionsToLookAt.assign(defaultRegions.begin(),defaultRegions.end());
    regions.clear();
    if(cas.getFS("QUERY", qs))
    {
      outWarn("loaction set in query: " << qs.location());
      if(std::find(defaultRegions.begin(), defaultRegions.end(), qs.location()) == std::end(defaultRegions) && qs.location()!="")
      {
        regionsToLookAt.clear();
        regionsToLookAt.push_back(qs.location());
      }
    }

    if(regions.empty())
    {
      std::vector<rs::SemanticMapObject> semanticRegions;
      getSemanticMapEntries(cas, regionsToLookAt, semanticRegions);

      regions.resize(semanticRegions.size());
      for(size_t i = 0; i < semanticRegions.size(); ++i)
      {

        Region &region = regions[i];
        region.width = semanticRegions[i].width();
        region.depth = semanticRegions[i].depth();
        region.height = semanticRegions[i].height();
        region.name = semanticRegions[i].name();
        rs::conversion::from(semanticRegions[i].transform(), region.transform);
      }
    }

    for(size_t i = 0; i < regions.size(); ++i)
    {
      if(frustumCulling(regions[i]) || !frustumCulling_)
      {
        outInfo("region inside frustum: " << regions[i].name);
        filterRegion(regions[i]);
      }
      else
      {
        outInfo("region outside frustum: " << regions[i].name);
      }
    }

    pcl::ExtractIndices<PointT> ei;
    ei.setKeepOrganized(true);
    ei.setIndices(indices);
    ei.filterDirectly(cloud);

    cas.set(VIEW_CLOUD, *cloud);

    if(changeDetection && !indices->empty())
    {
      ++frames;
      if(lastImg.empty())
      {
        lastMask = cv::Mat::ones(color.rows, color.cols, CV_8U);
        lastImg = cv::Mat::zeros(color.rows, color.cols, CV_32FC4);
      }

      uint32_t secondsPassed = camToWorld.stamp_.sec - lastTime.sec;
      bool change = checkChange() || cas.has("QUERY") || secondsPassed > timeout;

      if(!change)
      {
        ++filtered;
      }
      else
      {
        lastTime = camToWorld.stamp_;
      }
      outInfo("filtered frames: " << filtered << " / " << frames << "(" << (filtered / (float)frames) * 100 << "%)");

      if(!change)
      {
        outWarn("no changes in frame detected, no further processing!");
        throw rs::FrameFilterException();
      }
    }

    return UIMA_ERR_NONE;
  }