// spin until all counters reach at least max
void MyInfo::waitThreadFunc(
            robot_interaction::LockedRobotState* locked_state,
            int** counters,
            int max)
{
  bool go = true;
  while(go)
  {
    go = false;
    cnt_lock_.lock();
    for (int i = 0 ; counters[i] ; ++i)
    {
      if (counters[i][0] < max)
        go = true;
    }
    cnt_lock_.unlock();

    checkState(*locked_state);
    checkState(*locked_state);
  }
  cnt_lock_.lock();
  quit_ = true;
  cnt_lock_.unlock();
}
Пример #2
0
void TimerHandler(
	const boost::system::error_code & error,
	boost::shared_ptr< boost::asio::deadline_timer > timer,
	boost::shared_ptr< boost::asio::io_service::strand > strand
)
{
	if( error )
	{
		global_stream_lock.lock();
		std::cout << "[" << boost::this_thread::get_id()
			<< "] Error: " << error << std::endl;
		global_stream_lock.unlock();
	}
	else
	{
		std::cout << "[" << boost::this_thread::get_id()
			<< "] TimerHandler " << std::endl;

		timer->expires_from_now( boost::posix_time::seconds( 1 ) );
		timer->async_wait(
			strand->wrap( boost::bind( &TimerHandler, _1, timer, strand ) )
		);
	}
}
Пример #3
0
void cmd_vel_received(const geometry_msgs::Twist::ConstPtr& cmd_vel)
{
    //roomba->drive(cmd_vel->linear.x, cmd_vel->angular.z);
    double linear_speed = cmd_vel->linear.x; // m/s
    double angular_speed = cmd_vel->angular.z; // rad/s
    ROS_INFO("Velocity received: %.2f %.2f", linear_speed, angular_speed);

    //int left_speed_mm_s = (int)((linear_speed-ROBOT_AXLE_LENGTH*angular_speed/2)*1e3);		// Left wheel velocity in mm/s
    //int right_speed_mm_s = (int)((linear_speed+ROBOT_AXLE_LENGTH*angular_speed/2)*1e3);	// Right wheel velocity in mm/s

    orcp2::ORCP2 orcp(drive_serial);

    drive_serial_write.lock();

    if( linear_speed <  std::numeric_limits<double>::epsilon() &&
            linear_speed > -std::numeric_limits<double>::epsilon() ) {
        // zero linear speed - turn in place
        uint16_t speed = speed_koef * angular_speed * wheel_track  * gear_reduction / 2.0;
        orcp.drive_4wd(speed, -speed, speed, -speed);
    }
    else if( angular_speed <  std::numeric_limits<double>::epsilon() &&
             angular_speed > -std::numeric_limits<double>::epsilon() ) {
        // zero angular speed - pure forward/backward motion
        orcp.motorsWrite(speed_koef * linear_speed * wheel_track  * gear_reduction / 2.0);
    }
    else {
        // Rotation about a point in space
        //$TODO
        uint16_t left = speed_koef * linear_speed - angular_speed * wheel_track  * gear_reduction / 2.0;
        uint16_t right = speed_koef * linear_speed + angular_speed * wheel_track  * gear_reduction / 2.0;

        orcp.drive_4wd(left, right, left, right);
    }

    drive_serial_write.unlock();
}
Пример #4
0
void ServiceImpl::startSimControlTask()
{
	assert(serviceThread == NULL);

	//get module handles
	initModuleHandles();
	
	//get cmd arguments
	s_vpi_vlog_info vlog_info;
	vpi_get_vlog_info(&vlog_info);
	parseOptions(vlog_info.argc, vlog_info.argv);

	simMutex.lock();

	//start server thread
	serviceThread = new boost::thread(boost::bind(&ServiceImpl::serverThreadProc, this));
}
Пример #5
0
void setFloor(pcl::visualization::PCLVisualizer *viewer, string svm_filename, float voxel_size, Eigen::Matrix3f rgb_intrinsics_matrix, float min_height, float max_height)
{
	callback_args cb_args;
	PointCloudT::Ptr clicked_points_3d(new PointCloudT);
	cb_args.clicked_points_3d = clicked_points_3d;
	cb_args.viewerPtr = pcl::visualization::PCLVisualizer::Ptr(viewer);
	viewer->registerPointPickingCallback(pp_callback, (void*)&cb_args);

	cout << "Kliknij w 3 punkty na podlodze trzymajac wcisniety SHIFT";
	viewer->addText("Kliknij w 3 punkty na podlodze trzymajac wcisniety SHIFT", 250, 300, 20, 1, 1, 1);
	viewer->addText("Nastepnie nacisnij klawisz Q", 250, 250, 20, 1, 1, 1);

	// Spin until 'Q' is pressed:
	viewer->spin();
	std::cout << "Gotowe." << std::endl;
	

	cloud_mutex.unlock();

	// Ground plane estimation:
	ground_coeffs.resize(4);
	std::vector<int> clicked_points_indices;

	for (unsigned int i = 0; i < clicked_points_3d->points.size(); i++)
		clicked_points_indices.push_back(i);
		pcl::SampleConsensusModelPlane<PointT> model_plane(clicked_points_3d);
		model_plane.computeModelCoefficients(clicked_points_indices, ground_coeffs);
		std::cout << "Ground plane: " << ground_coeffs(0) << " " << ground_coeffs(1) << " " << ground_coeffs(2) << " " << ground_coeffs(3) << std::endl;

		// Create classifier for people detection:
		pcl::people::PersonClassifier<pcl::RGB> person_classifier;
		person_classifier.loadSVMFromFile(svm_filename);   // load trained SVM

		// People detection app initialization:
		// people detection object
		people_detector.setVoxelSize(voxel_size);                        // set the voxel size
		people_detector.setIntrinsics(rgb_intrinsics_matrix);            // set RGB camera intrinsic parameters
		people_detector.setClassifier(person_classifier);                // set person classifier
		//	  people_detector.setHeightLimits(min_height, max_height);         // set person height limits
			people_detector.setPersonClusterLimits(min_height, max_height, 0.1, 8.0); // set person height limits
		floor_tagged = true;
}
Пример #6
0
void LaserScan_callback(const sensor_msgs::LaserScan::ConstPtr& msg) {
  /* Using input from the front laser scanner check for two things:
  1. Whether there are not obstacles in front of the robot (close range)
  2. To which direction should the robot face to drive without hitting an obstacle (mid-range)
  */
  
  

  /*
  Number of range measurements = 512
  Angle of each sensor = about 0.35 degrees (0.00613592332229 radians)
  Field of view = about 180 degrees (3.141592741 radians)
  */

  // look for a clear corridor of 30 degrees and 1.5 meters ahead
  int numPixels = 512;
  float pixelAngle = 0.35; // in degrees


  if(numPixels!=msg->ranges.size())
    cout << "Warning: number of Hokuyo laser sensors different than usual (512)" << endl;


  // check that we don't run into any thing
  float safeDistance = 0.40; // 40cm
  float safetyAngle = 120; // 120 degrees
  int numClearPixelsPerSide = (safetyAngle/2)/pixelAngle;
  if(!canAdvance(msg->ranges,numClearPixelsPerSide,safeDistance)) {
    cout << "Stop, cannot advance" << endl;
	dir_value_mutex.lock();
	can_move = false;
	dir_value_mutex.unlock();
	} else {
		dir_value_mutex.lock();
		can_move = true;
		dir_value_mutex.unlock();
	}

  // find corridor direction
  int numCorridorPixels = 120; // 120*0.00613592332229*180/pi=42 degrees
  float clearDistance = 1.5; // 1.5 meters ahead
  float degreesToTurn = findClearCorridor(msg->ranges,numCorridorPixels,clearDistance,pixelAngle);

  // negative means to turn right
  dir_value_mutex.lock();
  direction = degreesToTurn;
  dir_value_mutex.unlock();
  cout << "Degrees to turn = " << degreesToTurn << endl;
}
Пример #7
0
void WorkerThread( boost::shared_ptr< boost::asio::io_service > io_service )
{
	global_stream_lock.lock();
	std::cout << "[" << boost::this_thread::get_id()
		<< "] Thread Start" << std::endl;
	global_stream_lock.unlock();

	while( true )
	{
		try
		{
			boost::system::error_code ec;
			io_service->run( ec );
			if( ec )
			{
				global_stream_lock.lock();
				std::cout << "[" << boost::this_thread::get_id()
					<< "] Error: " << ec << std::endl;
				global_stream_lock.unlock();
			}
			break;
		}
		catch( std::exception & ex )
		{
			global_stream_lock.lock();
			std::cout << "[" << boost::this_thread::get_id()
				<< "] Exception: " << ex.what() << std::endl;
			global_stream_lock.unlock();
		}
	}

	global_stream_lock.lock();
	std::cout << "[" << boost::this_thread::get_id()
		<< "] Thread Finish" << std::endl;
	global_stream_lock.unlock();
}
Пример #8
0
void WorkerThread(boost::shared_ptr<boost::asio::io_service> iosvc, int counter)
{
	global_stream_lock.lock();
	std::cout << "Thread " << counter << " Start.\n";
	global_stream_lock.unlock();

	try
	{
		iosvc->run();

		global_stream_lock.lock();
		std::cout << "Thread " << counter << " End.\n";
		global_stream_lock.unlock();
	}
	catch(std::exception & ex)
	{
		global_stream_lock.lock();
		std::cout << "Message: " << ex.what() << ".\n";
		global_stream_lock.unlock();
	}
}
// Check the state.  It should always be valid.
void MyInfo::checkState(robot_interaction::LockedRobotState &locked_state)
{
  robot_state::RobotStateConstPtr s = locked_state.getState();
  
  robot_state::RobotState cp1(*s);

  // take some time
  cnt_lock_.lock();
  cnt_lock_.unlock();
  cnt_lock_.lock();
  cnt_lock_.unlock();
  cnt_lock_.lock();
  cnt_lock_.unlock();

  // check mim_f == joint_f
  EXPECT_EQ(s->getVariablePositions()[MIM_F],
            s->getVariablePositions()[JOINT_F] * 1.5 + 0.1);

  robot_state::RobotState cp2(*s);

  EXPECT_NE(cp1.getVariablePositions(), cp2.getVariablePositions());
  EXPECT_NE(cp1.getVariablePositions(), s->getVariablePositions());

  int cnt = cp1.getVariableCount();
  for (int i = 0 ; i < cnt ; ++i)
  {
    EXPECT_EQ(cp1.getVariablePositions()[i], 
              cp2.getVariablePositions()[i]);
    EXPECT_EQ(cp1.getVariablePositions()[i], 
              s->getVariablePositions()[i]);
  }

  // check mim_f == joint_f
  EXPECT_EQ(s->getVariablePositions()[MIM_F],
            s->getVariablePositions()[JOINT_F] * 1.5 + 0.1);
}
Пример #10
0
	modbus_t *getContext() {
		update_mutex.lock();
		return ctx;
	}
Пример #11
0
void AutoTune::start()
{
    
//         vector<double> inputRates={ 0.630478,0.996342,0.729125,0.523573,0.276309,0.053071,0.00143688,0.431644};
//         double res;
//         EvaluateResult(inputRates, res);
    
#if 1
    this->workerSequence.resize(this->workerNum,NULL);
    vector<double> currResult(this->workerNum,-1.0);
    
    //construct parameter array.
    vector<Point3d> paramArray;
    
    for (double pCrit = pCritMin; pCrit <= pCritMax; pCrit+=critStep) {
        for (double nCrit = nCritMin; nCrit <= nCritMax; nCrit+=critStep) {
            for (double epsilon = epsilonMin; epsilon <= epsilonMax; epsilon+=epsilonStep) {
                Point3d newParam;
                newParam.x = pCrit;
                newParam.y = nCrit;
                newParam.z = epsilon;
                paramArray.push_back(newParam);
            }
        }
    }
    
    LOG(INFO)<<"paramArray size = "<<paramArray.size();
    
    while (!paramArray.empty()) {
        
        for (int i=0; i<workerNum; i++) {
            
            if(NULL == workerSequence[i])
            {
                if(paramArray.empty()) break;
                Point3d param = paramArray.back();
                paramArray.pop_back();
                this->workingParams[i] = param;
#if( defined(WIN32) && _MSC_VER<=1700)//before VS2012, the  variadic template is not fully supported.
                this->workerSequence[i] = new boost::thread(&AutoTune::WorkWithParameters,this,i,param.x,param.y,param.z);
#else
				this->workerSequence[i] = new std::thread(&AutoTune::WorkWithParameters,this,i,param.x,param.y,param.z);
#endif
            }else{
                workerSequence[i]->join();
                delete workerSequence[i];
                
                //write result
                myLock.lock();
                if(accuracies[i]>0.8)
                {
                    fstream fs("result.txt",ios::out|ios::app);
                    fs<<"Result = "<<this->accuracies[i]<<" parameters = "<<workingParams[i]<<" weights = ";
                    for (int j=0; j<dim; j++) {
                        fs<<this->weightResults[i][j]<<" ";
                    }
                    fs<<endl;
                    fs.close();
                }
                myLock.unlock();
                
                //create new worker.
                if(paramArray.empty()) break;
                
                Point3d param = paramArray.back();
                paramArray.pop_back();
                this->workingParams[i] = param;
#if( defined(WIN32) && _MSC_VER<=1700)//before VS2012, the variadic template is not fully supported.
				this->workerSequence[i] = new boost::thread(&AutoTune::WorkWithParameters,this,i,param.x,param.y,param.z);
#else
                this->workerSequence[i] = new std::thread(&AutoTune::WorkWithParameters,this,i,param.x,param.y,param.z);
#endif                
            }
            
        }
        
        
    }
    
#endif
    
}
void callbackClusteredClouds(const clustered_clouds_msgs::ClusteredCloudsConstPtr& msg)
{
#if PUBLISH_TRANSFORM
  static tf::TransformBroadcaster tf_br;
#endif

  g_marker_array.markers.clear();
  g_marker_id = 0;

  pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZRGB>);
  std::vector<tf::Vector3> hand_positions, arm_directions;
  mutex_config.lock();
  for(size_t i = 0; i < msg->clouds.size(); i++)
  {
    bool cloud_with_rgb_data = false;
    std::string field_list = pcl::getFieldsList (msg->clouds[i]);    
    if(field_list.rfind("rgb") != std::string::npos)
    {
      cloud_with_rgb_data = true;
    }



    if(cloud_with_rgb_data)
    {
      pcl::PointCloud<pcl::PointXYZRGB>::Ptr hand_cloud(new pcl::PointCloud<pcl::PointXYZRGB>);
      //pcl::PointCloud<pcl::PointXYZRGB>::Ptr finger_cloud(new pcl::PointCloud<pcl::PointXYZRGB>);
      tf::Vector3 hand_position, arm_position, arm_direction;
      bool found = checkCloud<pcl::PointXYZRGB>(msg->clouds[i],
                                                hand_cloud,
                                                //finger_cloud,
                                                msg->header.frame_id,
                                                hand_position,
                                                arm_position,
                                                arm_direction);
      if(found)
      {
        hand_positions.push_back(hand_position);
        arm_directions.push_back(arm_direction);
        *cloud_out += *hand_cloud;
      }
    }
    //else
    //{
      //checkCloud<pcl::PointXYZ>(msg->clouds[i], msg->header.frame_id);
    //}
  }
  mutex_config.unlock();



  if(hand_positions.size() > g_hand_trackers.size())
  {
    std::vector<tf::Vector3> hand_positions_tmp = hand_positions;
    cv::Mat result;
    for (size_t i = 0; i < g_hand_trackers.size(); i++)
    {
      result = g_hand_trackers[i].first.lastResult();
      tf::Vector3 point1(result.at<float>(0), result.at<float>(1), result.at<float>(2));
      int index = closestPoint(point1, hand_positions_tmp);
      //remove
      hand_positions_tmp.erase(hand_positions_tmp.begin() + index);
    }

    //add new tracker
    for(size_t i = 0; i < hand_positions_tmp.size(); i++)
    {
      KalmanFilter3d tracker;
      cv::Point3f point(hand_positions_tmp[i].x(),
                        hand_positions_tmp[i].y(),
                        hand_positions_tmp[i].z());
      tracker.initialize(1.0 / 30.0, point, 1e-2, 1e-1, 1e-1);
      TrackerWithHistory t;
      t.first = tracker;
      g_hand_trackers.push_back(t);
    }
  }

  cv::Mat result;
  std::vector<tf::Vector3> results;
  interaction_msgs::ArmsPtr arms_msg(new interaction_msgs::Arms);
  arms_msg->arms.resize(g_hand_trackers.size());
  for (size_t i = 0; i < g_hand_trackers.size(); i++)
  {
    g_hand_trackers[i].first.predict(result);
    results.push_back(tf::Vector3(result.at<float>(0), result.at<float>(1), result.at<float>(2)));
    arms_msg->arms[i].hand.rotation.w = 1;
  }

  int index;
  cv::Point3f measurement;
  tf::Quaternion q_hand;
  Eigen::Quaternionf q;
  tf::Quaternion q_rotate;
  q_rotate.setEuler(0, 0, M_PI);
  int last_index = -1;
  for(size_t i = 0; i < hand_positions.size(); i++)
  {
    index = closestPoint(hand_positions[i], results);
    if(last_index == index)
      continue;
    last_index = index;
    measurement.x = hand_positions[i].x();
    measurement.y = hand_positions[i].y();
    measurement.z = hand_positions[i].z();
    g_hand_trackers[index].first.update(measurement, result);
    results[index] = tf::Vector3(result.at<float>(0), result.at<float>(1), result.at<float>(2));

    q.setFromTwoVectors(Eigen::Vector3f(1, 0, 0),
                        Eigen::Vector3f(arm_directions[i].x(), arm_directions[i].y(), arm_directions[i].z()));
    tf::Quaternion q_tf(q.x(), q.y(), q.z(), q.w());
    q_hand = q_tf * q_rotate;
    arms_msg->arms[index].hand.rotation.x = q_hand.x();
    arms_msg->arms[index].hand.rotation.y = q_hand.y();
    arms_msg->arms[index].hand.rotation.z = q_hand.z();
    arms_msg->arms[index].hand.rotation.w = q_hand.w();
  }

  for(size_t i = 0; i < results.size(); i++)
  {
    g_hand_trackers[i].first.updateState();
    arms_msg->arms[i].arm_id = g_hand_trackers[i].first.id();
    arms_msg->arms[i].hand.translation.x = results[i].x();
    arms_msg->arms[i].hand.translation.y = results[i].y();
    arms_msg->arms[i].hand.translation.z = results[i].z();

    //prepare markers if needed
    if(g_marker_array_pub.getNumSubscribers() != 0)
    {
      geometry_msgs::Point marker_point;
      marker_point.x = results[i].x();
      marker_point.y = results[i].y();
      marker_point.z = results[i].z();
      g_hand_trackers[i].second.push_back(marker_point);
      if(g_hand_trackers[i].second.size() > HAND_HISTORY_SIZE)
        g_hand_trackers[i].second.pop_front();
    }

#if PUBLISH_TRANSFORM
    tf::Transform hand_tf;
    hand_tf.setOrigin(results[i]);

    if(index != -1)
    {
      hand_tf.setRotation(tf::Quaternion(arms_msg->arms[i].hand.rotation.x,
                                         arms_msg->arms[i].hand.rotation.y,
                                         arms_msg->arms[i].hand.rotation.z,
                                         arms_msg->arms[i].hand.rotation.w));
    }
    else
    {
      hand_tf.setRotation(tf::Quaternion(0, 0, 0, 1));
    }

    std::stringstream ss;
    ss << "hand" << g_hand_trackers[i].first.id();

    tf_br.sendTransform(tf::StampedTransform(hand_tf, ros::Time::now(), msg->header.frame_id, ss.str()));
#endif

  }

  //remove dead tracker
  std::vector<TrackerWithHistory>::iterator it = g_hand_trackers.begin();
  while(it != g_hand_trackers.end())
  {
    if(it->first.getState() == KalmanFilter3d::DIE)
    {
      ROS_DEBUG("remove tracker %d", it->first.id());
      it = g_hand_trackers.erase(it);      
    }
    else
    {
      it++;      
    }
  }

  //prepare markers if needed
  if(g_marker_array_pub.getNumSubscribers() != 0)
  {
    Marker marker;
    marker.type = Marker::LINE_STRIP;
    marker.lifetime = ros::Duration(0.1);
    marker.header.frame_id = msg->header.frame_id;
    marker.scale.x = 0.005;
    marker.pose.position.x = 0;
    marker.pose.position.y = 0;
    marker.pose.position.z = 0;
    marker.pose.orientation.x = 0;
    marker.pose.orientation.y = 0;
    marker.pose.orientation.z = 0;
    marker.pose.orientation.w = 1;
    marker.ns = "point_history";

    for (size_t i = 0; i < g_hand_trackers.size(); i++)
    {
      marker.points.clear();
      if (g_hand_trackers[i].second.size() != 0)
      {
        marker.id = g_marker_id++;
        std::list<geometry_msgs::Point>::iterator it;
        for (it = g_hand_trackers[i].second.begin(); it != g_hand_trackers[i].second.end(); it++)
        {
          marker.points.push_back(*it);
        }
        marker.color.r = 1;
        marker.color.g = 0;
        marker.color.b = 0;
        marker.color.a = 1.0;
        g_marker_array.markers.push_back(marker);
      }
    }
  }


  if(g_arms_pub.getNumSubscribers() != 0)
  {
    arms_msg->header.stamp = ros::Time::now();
    arms_msg->header.frame_id = msg->header.frame_id;
    g_arms_pub.publish(arms_msg);
  }

  if(g_cloud_pub.getNumSubscribers() != 0)
  {
    sensor_msgs::PointCloud2 cloud_out_msg;
    pcl::toROSMsg(*cloud_out, cloud_out_msg);
    cloud_out_msg.header.stamp = ros::Time::now();
    cloud_out_msg.header.frame_id = msg->header.frame_id;
    g_cloud_pub.publish(cloud_out_msg);
  }

  if((g_marker_array_pub.getNumSubscribers() != 0) && (!g_marker_array.markers.empty()))
  {
    g_marker_array_pub.publish(g_marker_array);
  }
}
Пример #13
0
// Called when a thread wants to indicate that it has started.
void threadStarted()
{
	runningLock.lock();
	runningThreads++;
	runningLock.unlock();
}
Пример #14
0
void imageCallback(const sensor_msgs::ImageConstPtr & msg){

#ifdef PRINT_ROS_INFO
  ROS_INFO("Got image message.");
#endif

  // get the compressed image, and convert it to Opencv format.
  cv::Mat img;
  try{
   img =  cv_bridge::toCvShare(msg, "bgr8")->image;
  }
  catch(cv_bridge::Exception & e){
    ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
  }

#ifdef PRINT_ROS_INFO
  ROS_INFO("Converting image done.");
#endif  

  //std::cout << "image size = ( " << img.rows << " X " << img.cols << " )." << std::endl;
  //printf("image data address 0x%x\n", img.data);

  if( startTracking ){

    trackerMutex.lock();

#ifdef PRINT_ROS_INFO

    ROS_INFO("Tracker: Reading Frame ... ");
#endif    

    // update the tracking status, and draw the result.
    tracker->readFrame(img);
    
#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: Updating status ... ");
#endif    

    tracker->updateTrackerStatus();

#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: status updated ... ");
    ROS_INFO("Tracker: drawing ... ");
#endif    

    cv::Mat temp;
    img.copyTo(temp);
    tracker->drawTrackers(temp);
    
#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: Publishing ... ");
#endif    

    // republish this image.
    sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", temp).toImageMsg();
    pub.publish(msg);

    // publish to topic -- object_location
    cv::Point2i location = tracker->getWeightedAverageLocation();
    std::stringstream locationStrStream;

    int currentNum = tracker->getSampleNum();
    int numWithHigConfidence = tracker->getNumOfSamplesHasProbLargerThan(PROB_THRESHOD);

    
    float highConfidenceSampleRatio;
    if( currentNum <= 0){
      highConfidenceSampleRatio = 0;
    }else{
      highConfidenceSampleRatio = numWithHigConfidence * 1.0f / currentNum;
    }

    std::cout << "High confidence sample ratio = " << highConfidenceSampleRatio << std::endl; 
    
    if( location.x < 0 || location.y < 0 || highConfidenceSampleRatio <= HIGH_CONFID_NUM_RATIO_THRESHOLD ){
      //locationStrStream << "object_x " << "0" << " , " << "object_y " << "0" << " , ";
      
      locationStrStream << "object_x " << img.cols /2   << ", " << "object_y " << img.rows / 2 << ", ";
      
      // make offsets to the samples:
      
      ROS_INFO("Tracker offset!");
      if( lastMovementDirection == TRACKER_UP)
	tracker->offsetTracker(TRACKER_DOWN);
      else if( lastMovementDirection == TRACKER_DOWN)
	tracker->offsetTracker(TRACKER_UP);
      else if( lastMovementDirection == TRACKER_LEFT)
	tracker->offsetTracker(TRACKER_RIGHT);
      else if( lastMovementDirection == TRACKER_RIGHT)
	tracker->offsetTracker(TRACKER_LEFT);
      
      
    }else{
      // "x 10, y 10, width 360, height 640"
      locationStrStream << "object_x " << location.x << ", " << "object_y " << location.y << ", ";
      lastMovementDirection = -1;
    }

    locationStrStream << "width " << img.cols << ", " << "height " << img.rows << ", ";

    locationStrStream << "direction follow" ;

    std_msgs::String locationMsg;
    locationMsg.data = locationStrStream.str();
    location_pub.publish(locationMsg);
        
    // release the lock
    trackerMutex.unlock();

  }

  else if (! objectSpecified ) {
    // show the image and let the user specify the inital result.

    //std::cout << img.rows << "," << img.cols << std::endl;
    
    cv::Mat tempImage(img.rows, img.cols, img.type());
    img.copyTo(tempImage);

    
    if( trackerMaxSize < 0){
      trackerMaxSize = MIN(img.rows, img.cols) - 1;
    }
    
    ROS_INFO("Drawing tracker ... ");

    cv::rectangle(tempImage, cv::Rect(tempImage.cols / 2 - trackerSize / 2, tempImage.rows / 2 - trackerSize / 2, trackerSize, trackerSize), cv::Scalar(0,0,255));
    
    // republish this image.
    sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", tempImage).toImageMsg();
    pub.publish(msg);

  }
  
  else{

    trackerMutex.lock();
    
    // haven't started tracking, but the initial object is specified.
    // create a tracker;
    if( tracker != NULL) delete tracker;

    tracker = new Tracker2D();

    tracker->setLimit(5 , img.cols - 5, 5 , img.rows - 5);
    tracker->initialize(cv::Point2f(img.cols / 2, img.rows / 2), trackerSize);
    
    // set object feature.
    cv::Mat objImage = img(cv::Rect(img.cols / 2 - trackerSize /2, img.rows / 2 - trackerSize /2, trackerSize, trackerSize));
    
    tracker->setObjectFeature(objImage);
    
    ROS_INFO("Starting Tracking ... " );

    startTracking = true;

    trackerMutex.unlock();
  }
}
Пример #15
0
void threadHandler(char driveLtr)
{
    /* Lock the mutex so we don't try and access the database at the same time */
    gMutex.lock();

    bool authorized = false;

    /* Lock the USB drive */
    UsbOps ops;
    ops.lockUSB(driveLtr);

    /* Query authorized devices */
    Sql sql;
    if (!sql.dbConnect((char*)Paths::getDatabasePath().c_str(), false))
    {
        ErrorLog::logErrorToFile("*CRITICAL*", "Unable to open authorized drives database!");
        ops.ejectUSB();
        gMutex.unlock();
        return;
    }
    std::vector<authedDrive> drvs;
    sql.queryAuthedDrives(&drvs);

    /* Get the serial key of the device */
    UsbKey usbKey;
    UsbKeyhdr hdr;

    ops.unlockUSB();
    usbKey.getUsbKeyHdr(&hdr, driveLtr);
    ops.lockUSB(driveLtr);

    /* Check if the serial exists in the database */
    for (std::vector<authedDrive>::iterator it = drvs.begin(); it != drvs.end(); it++)
    {
        std::cout << it->serial.c_str() << " " << hdr.serialkey.c_str() << std::endl;
        if (it->serial.compare(hdr.serialkey) == 0)
        {
            authorized = true;
            break;
        }
    }

    ops.unlockUSB();

    /* Log media insertion event */

    AccessLog *log = new AccessLog();
    log->createLogStruct(&log->logUSBStruct, driveLtr, (char*)hdr.serialkey.c_str());
    log->logUSBStruct.accepted = authorized;


    /* Get config settings, check if remote SQL is enabled */
    ConfigParser configParser((char*)Paths::getConfigPath().c_str());
    if (configParser.getValue("SQLenabled") == "1")
    {
        /* SQL enabled = true */
        log->logUsbDrive(log->logUSBStruct, true);
    }
    else
        log->logUsbDrive(log->logUSBStruct, false);

    /* If not authorized, eject it! */
    if (!authorized)
    {
        ops.lockUSB(driveLtr);
        ops.ejectUSB();
    }

    /* Check for viruses here... */

    gMutex.unlock();
    delete log;
}
	/** Callback: On recalc local map & publish it */
	void onDoPublish(const ros::TimerEvent& )
	{
		mrpt::utils::CTimeLoggerEntry tle(m_profiler,"onDoPublish");

		// Purge old observations & latch a local copy:
		TListObservations obs;
		{
			mrpt::utils::CTimeLoggerEntry tle(m_profiler,"onDoPublish.removingOld");
			m_hist_obs_mtx.lock();

			// Purge old obs:
			if (!m_hist_obs.empty())
			{
				const double last_time = m_hist_obs.rbegin()->first;
				TListObservations::iterator it_first_valid = m_hist_obs.lower_bound( last_time-m_time_window );
				const size_t nToRemove = std::distance( m_hist_obs.begin(), it_first_valid );
				ROS_DEBUG("[onDoPublish] Removing %u old entries, last_time=%lf",static_cast<unsigned int>(nToRemove), last_time);
				m_hist_obs.erase(m_hist_obs.begin(), it_first_valid);
			}
			// Local copy in this thread:
			obs = m_hist_obs;
			m_hist_obs_mtx.unlock();
		}

		ROS_DEBUG("Building local map with %u observations.",static_cast<unsigned int>(obs.size()));
		if (obs.empty())
			return;

		// Build local map(s):
		// -----------------------------------------------
		m_localmap_pts.clear();
		mrpt::poses::CPose3D curRobotPose;
		{
			mrpt::utils::CTimeLoggerEntry tle2(m_profiler,"onDoPublish.buildLocalMap");

			// Get the latest robot pose in the reference frame (typ: /odom -> /base_link)
			// so we can build the local map RELATIVE to it:
			try {
				tf::StampedTransform tx;
				m_tf_listener.lookupTransform(m_frameid_reference,m_frameid_robot, ros::Time(0), tx);
				mrpt_bridge::convert(tx,curRobotPose);
				ROS_DEBUG("[onDoPublish] Building local map relative to latest robot pose: %s", curRobotPose.asString().c_str() );
			}
			catch (tf::TransformException &ex) {
				ROS_ERROR("%s",ex.what());
				return;
			}

			// For each observation: compute relative robot pose & insert obs into map:
			for (TListObservations::const_iterator it=obs.begin();it!=obs.end();++it)
			{
				const TInfoPerTimeStep & ipt = it->second;

				// Relative pose in the past:
				mrpt::poses::CPose3D relPose(mrpt::poses::UNINITIALIZED_POSE);
				relPose.inverseComposeFrom( ipt.robot_pose, curRobotPose );

				// Insert obs:
				m_localmap_pts.insertObservationPtr(ipt.observation,&relPose);

			} // end for

		}

		// Publish them:
		if (m_pub_local_map_pointcloud.getNumSubscribers()>0)
		{
			sensor_msgs::PointCloudPtr msg_pts = sensor_msgs::PointCloudPtr( new sensor_msgs::PointCloud);
			msg_pts->header.frame_id = m_frameid_robot;
			msg_pts->header.stamp = ros::Time( obs.rbegin()->first );
			mrpt_bridge::point_cloud::mrpt2ros(m_localmap_pts,msg_pts->header, *msg_pts);
			m_pub_local_map_pointcloud.publish(msg_pts);
		}

		// Show gui:
		if (m_show_gui)
		{
			if (!m_gui_win)
			{
				m_gui_win = mrpt::gui::CDisplayWindow3D::Create("LocalObstaclesNode",800,600);
				mrpt::opengl::COpenGLScenePtr &scene = m_gui_win->get3DSceneAndLock();
				scene->insert( mrpt::opengl::CGridPlaneXY::Create() );
				scene->insert( mrpt::opengl::stock_objects::CornerXYZSimple(1.0,4.0) );

				mrpt::opengl::CSetOfObjectsPtr gl_obs = mrpt::opengl::CSetOfObjects::Create();
				gl_obs->setName("obstacles");
				scene->insert( gl_obs );

				mrpt::opengl::CPointCloudPtr gl_pts = mrpt::opengl::CPointCloud::Create();
				gl_pts->setName("points");
				gl_pts->setPointSize(2.0);
				gl_pts->setColor_u8( mrpt::utils::TColor(0x0000ff) );
				scene->insert( gl_pts );

				m_gui_win->unlockAccess3DScene();
			}

			mrpt::opengl::COpenGLScenePtr &scene = m_gui_win->get3DSceneAndLock();
			mrpt::opengl::CSetOfObjectsPtr gl_obs = mrpt::opengl::CSetOfObjectsPtr( scene->getByName("obstacles") );
			ROS_ASSERT(gl_obs.present());
			gl_obs->clear();

			mrpt::opengl::CPointCloudPtr gl_pts = mrpt::opengl::CPointCloudPtr( scene->getByName("points") );

			for (TListObservations::const_iterator it=obs.begin();it!=obs.end();++it)
			{
				const TInfoPerTimeStep & ipt = it->second;
				// Relative pose in the past:
				mrpt::poses::CPose3D relPose(mrpt::poses::UNINITIALIZED_POSE);
				relPose.inverseComposeFrom( ipt.robot_pose, curRobotPose );

				mrpt::opengl::CSetOfObjectsPtr gl_axis = mrpt::opengl::stock_objects::CornerXYZSimple(0.9,2.0);
				gl_axis->setPose(relPose);
				gl_obs->insert(gl_axis);
			} // end for

			gl_pts->loadFromPointsMap(&m_localmap_pts);


			m_gui_win->unlockAccess3DScene();
			m_gui_win->repaint();
		}


	} // onDoPublish
int main (int argc, char** argv)
{
  // --------------------------------------
  // -----Parse Command Line Arguments-----
  // --------------------------------------
  if (pcl::console::find_argument (argc, argv, "-h") >= 0)
  {
    printUsage (argv[0]);
    return 0;
  }
  if (pcl::console::parse (argc, argv, "-d", device_id) >= 0)
    cout << "Using device id \""<<device_id<<"\".\n";
  if (pcl::console::parse (argc, argv, "-r", angular_resolution) >= 0)
    cout << "Setting angular resolution to "<<angular_resolution<<"deg.\n";
  angular_resolution = pcl::deg2rad (angular_resolution);
  if (pcl::console::parse (argc, argv, "-s", support_size) >= 0)
    cout << "Setting support size to "<<support_size<<"m.\n";
  if (pcl::console::parse (argc, argv, "-i", min_interest_value) >= 0)
    cout << "Setting minimum interest value to "<<min_interest_value<<".\n";
  if (pcl::console::parse (argc, argv, "-t", max_no_of_threads) >= 0)
    cout << "Setting maximum number of threads to "<<max_no_of_threads<<".\n";
  
  pcl::visualization::RangeImageVisualizer range_image_widget ("Range Image");
  
  pcl::visualization::PCLVisualizer viewer ("3D Viewer");
  viewer.addCoordinateSystem (1.0f);
  viewer.setBackgroundColor (1, 1, 1);
  
  viewer.initCameraParameters ();
  viewer.camera_.pos[0] = 0.0;
  viewer.camera_.pos[1] = -0.3;
  viewer.camera_.pos[2] = -2.0;
  viewer.camera_.focal[0] = 0.0;
  viewer.camera_.focal[1] = 0.0+viewer.camera_.pos[1];
  viewer.camera_.focal[2] = 1.0;
  viewer.camera_.view[0] = 0.0;
  viewer.camera_.view[1] = -1.0;
  viewer.camera_.view[2] = 0.0;
  viewer.updateCamera ();
  
  openni_wrapper::OpenNIDriver& driver = openni_wrapper::OpenNIDriver::getInstance ();
  if (driver.getNumberDevices () > 0)
  {
    for (unsigned deviceIdx = 0; deviceIdx < driver.getNumberDevices (); ++deviceIdx)
    {
      cout << "Device: " << deviceIdx + 1 << ", vendor: " << driver.getVendorName (deviceIdx)
           << ", product: " << driver.getProductName (deviceIdx) << ", connected: "
           << (int) driver.getBus (deviceIdx) << " @ " << (int) driver.getAddress (deviceIdx)
           << ", serial number: \'" << driver.getSerialNumber (deviceIdx) << "\'\n";
    }
  }
  else
  {
    cout << "\nNo devices connected.\n\n";
    return 1;
  }
  
  pcl::Grabber* interface = new pcl::OpenNIGrabber (device_id);
  EventHelper event_helper;
  
  boost::function<void (const boost::shared_ptr<openni_wrapper::DepthImage>&) > f_depth_image =
    boost::bind (&EventHelper::depth_image_cb, &event_helper, _1);
  boost::signals2::connection c_depth_image = interface->registerCallback (f_depth_image);
  
  cout << "Starting grabber\n";
  interface->start ();
  cout << "Done\n";
  
  boost::shared_ptr<pcl::RangeImagePlanar> range_image_planar_ptr (new pcl::RangeImagePlanar);
  pcl::RangeImagePlanar& range_image_planar = *range_image_planar_ptr;
  
  pcl::RangeImageBorderExtractor range_image_border_extractor;
  pcl::NarfKeypoint narf_keypoint_detector;
  narf_keypoint_detector.setRangeImageBorderExtractor (&range_image_border_extractor);
  narf_keypoint_detector.getParameters ().support_size = support_size;
  narf_keypoint_detector.getParameters ().max_no_of_threads = max_no_of_threads;
  narf_keypoint_detector.getParameters ().min_interest_value = min_interest_value;
  //narf_keypoint_detector.getParameters ().calculate_sparse_interest_image = false;
  //narf_keypoint_detector.getParameloadters ().add_points_on_straight_edges = true;
  
  pcl::PointCloud<pcl::PointXYZ>::Ptr keypoints_cloud_ptr (new pcl::PointCloud<pcl::PointXYZ>);
  pcl::PointCloud<pcl::PointXYZ>& keypoints_cloud = *keypoints_cloud_ptr;
  
  while (!viewer.wasStopped ())
  {
    viewer.spinOnce ();
    range_image_widget.spinOnce ();  // process GUI events
    pcl_sleep (0.01);
    
    bool got_new_range_image = false;
    if (received_new_depth_data && depth_image_mutex.try_lock ())
    {
      received_new_depth_data = false;
      
      //unsigned long time_stamp = depth_image_ptr->getTimeStamp ();
      //int frame_id = depth_image_ptr->getFrameID ();
      const unsigned short* depth_map = depth_image_ptr->getDepthMetaData ().Data ();
      int width = depth_image_ptr->getWidth (), height = depth_image_ptr->getHeight ();
      float center_x = width/2, center_y = height/2;
      float focal_length_x = depth_image_ptr->getFocalLength (), focal_length_y = focal_length_x;
      float original_angular_resolution = asinf (0.5f*float (width)/float (focal_length_x)) / (0.5f*float (width));
      float desired_angular_resolution = angular_resolution;
      range_image_planar.setDepthImage (depth_map, width, height, center_x, center_y,
                                        focal_length_x, focal_length_y, desired_angular_resolution);
      depth_image_mutex.unlock ();
      got_new_range_image = !range_image_planar.points.empty ();
    }
    
    if (!got_new_range_image)
      continue;
    
    // --------------------------------
    // -----Extract NARF keypoints-----
    // --------------------------------
    if (set_unseen_to_max_range)
      range_image_planar.setUnseenToMaxRange ();
    narf_keypoint_detector.setRangeImage (&range_image_planar);
    pcl::PointCloud<int> keypoint_indices;
    double keypoint_extraction_start_time = pcl::getTime();
    narf_keypoint_detector.compute (keypoint_indices);
    double keypoint_extraction_time = pcl::getTime()-keypoint_extraction_start_time;
    std::cout << "Found "<<keypoint_indices.points.size ()<<" key points. "
              << "This took "<<1000.0*keypoint_extraction_time<<"ms.\n";
    
    // ----------------------------------------------
    // -----Show keypoints in range image widget-----
    // ----------------------------------------------
    range_image_widget.showRangeImage (range_image_planar, 0.5f, 10.0f);
    //for (size_t i=0; i<keypoint_indices.points.size (); ++i)
      //range_image_widget.markPoint (keypoint_indices.points[i]%range_image_planar.width,
                                    //keypoint_indices.points[i]/range_image_planar.width,
                                    //pcl::visualization::Vector3ub (0,255,0));
    
    // -------------------------------------
    // -----Show keypoints in 3D viewer-----
    // -------------------------------------
    pcl::visualization::PointCloudColorHandlerCustom<pcl::PointWithRange> color_handler_cloud
      (range_image_planar_ptr, 0, 0, 0);
    if (!viewer.updatePointCloud<pcl::PointWithRange> (range_image_planar_ptr, color_handler_cloud, "range image"))
      viewer.addPointCloud<pcl::PointWithRange> (range_image_planar_ptr, color_handler_cloud, "range image");
    
    keypoints_cloud.points.resize (keypoint_indices.points.size ());
    for (size_t i=0; i<keypoint_indices.points.size (); ++i)
      keypoints_cloud.points[i].getVector3fMap () =
        range_image_planar.points[keypoint_indices.points[i]].getVector3fMap ();
    pcl::visualization::PointCloudColorHandlerCustom<pcl::PointXYZ> color_handler_keypoints
      (keypoints_cloud_ptr, 0, 255, 0);
    if (!viewer.updatePointCloud (keypoints_cloud_ptr, color_handler_keypoints, "keypoints"))
      viewer.addPointCloud (keypoints_cloud_ptr, color_handler_keypoints, "keypoints");
    viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 7, "keypoints");
  }

  interface->stop ();
}
Пример #18
0
static Result execute_testcase (struct InfoForExecutor &ie)
{
	pthread_t tid = pthread_self ();
	LoggerPtr logger = Logger :: getLogger (HARNESS_LOGGER_NAME);

    string tid_str = iTos ((uint64_t)tid);
    string worker_tag = LOGGER_TAG_WORKER;
	if (ie.selftesting == false)
		worker_tag = worker_tag + '[' + tid_str + ']';
	else
		worker_tag = worker_tag + "[]";

	LogString saved_context;
    LOGGER_PUSH_NDCTAG (worker_tag);

	int retValue;
	Result result = RESULT_SYSTEM_EXCEPTION;
	string result_str = "FAILED";
	long int sTime = 0, eTime = 0;
	string failureReason("Test case execution failed, Check log file.");

	scidbtestharness::executors::ExecutorFactory f;
	scidbtestharness :: executors :: Executor *caseexecutor = 0;
	try
	{
		prepare_filepaths (ie, true);

		string actualrfile_backup = ie.actual_rfile + ".bak";
		string difffile_backup = ie.diff_file + ".bak";
		string logfile_backup = ie.log_file + ".bak";
		if (ie.keepPreviousRun)
		{
			/* rename all the files with extra extension .bak */
			if (Is_regular (ie.actual_rfile))
			{
				bfs::remove (actualrfile_backup);	
				bfs::rename (ie.actual_rfile, actualrfile_backup);	
			}
			if (Is_regular (ie.diff_file))
			{
				bfs::remove (difffile_backup);	
				bfs::rename (ie.diff_file, difffile_backup);	
			}
			if (Is_regular (ie.log_file))
			{
				bfs::remove (logfile_backup);	
				bfs::rename (ie.log_file, logfile_backup);	
			}
		}
		else
		{
			/* remove actual files as well as backup files */
			bfs::remove (ie.actual_rfile);
			bfs::remove (ie.diff_file);
			bfs::remove (ie.log_file);
			bfs::remove (actualrfile_backup);	
			bfs::remove (difffile_backup);	
			bfs::remove (logfile_backup);	
		}

		if ((caseexecutor = f.getExecutor (g_executor_type)) == NULL)
		{
			throw ConfigError (FILE_LINE_FUNCTION, ERR_CONFIG_INVALID_EXECUTOR_TYPE);
		}
		sTime = time (0);
		ie.logger_name = tid_str;

		complete_es_mutex.lock();   /* lock */
		g_test_count++;
		ie.test_sequence_number = g_test_count;
		ie.tid = tid;
		ie.testID = converttoid (ie.rootDir, ie.tcfile);
		complete_es_mutex.unlock(); /* unlock */

		/* test case execution by the executor.
		 * Now all the exceptions from 'defaultexecutor' are being locally handled by it and
		 * only return code SUCCESS/FAILURE is only being returned. Hence resolving the issue of crash
		 * during pthread_mutex_destroy() at the end of harness execution. */
		retValue = caseexecutor->execute (ie);
		eTime = time (0);
		delete caseexecutor;
		caseexecutor = 0;

		LOG4CXX_DEBUG (logger, "Executor returned : " << (retValue == SUCCESS ? "SUCCESS" : "FAILURE"));
		/* lock */
		complete_es_mutex.lock();

		if (retValue == SUCCESS)
		{
			if (ie.record)                 // PASS
			{
				bfs::remove (ie.expected_rfile);	
				bfs::rename (ie.actual_rfile, ie.expected_rfile);	
				result = RESULT_RECORDED;
				result_str = "RECORDED";
				failureReason = "";
				complete_es.testcasesPassed++;
			}
			else
			{
				int rv;
				LOG4CXX_DEBUG (logger, "Going to compare the files now.");
				if ((rv = diff (ie.expected_rfile, ie.actual_rfile, ie.diff_file)) == DIFF_FILES_MATCH)   // PASS
				{
					LOG4CXX_DEBUG (logger, "Files Match");
					result = RESULT_PASS;
					result_str = "PASS";
					failureReason = "";
					complete_es.testcasesPassed++;
				}
				else if (rv == DIFF_FILES_DIFFER)       // FAIL
				{
					LOG4CXX_DEBUG (logger, "Files Differ");
					result = RESULT_FILES_DIFFER;
					result_str = "FILES_DIFFER";
					failureReason = "Expected output and Actual Output differ. Check .diff file.";
					complete_es.testcasesFailed++;
				}
				else                          // SYSTEM_EXCEPTION
				{
					LOG4CXX_DEBUG (logger, "Either \"diff\" command failed or some other problem");
					result = RESULT_SYSTEM_EXCEPTION;
					result_str = "DIFF_COMMAND_FAILED";
					failureReason = "Either \"diff\" command failed or some other problem";
					complete_es.testcasesFailed++;
				}
			}
		}
		else                                   // ANY EXCEPTION, error
		{
		    /* ANY EXCEPTION, error because of which executor failed to execute the test case
 			 * primarily because errors like, problem in opening file, .test file parsing error, failure in connecting to scidb etc.*/
			result = RESULT_SYSTEM_EXCEPTION;
			result_str = "EXECUTOR_FAILED";
			LOG4CXX_ERROR (logger, "Test case execution failed. Canceling further execution of this test case. Check respective log file.");
			failureReason = "Test case execution failed. Check log file.";
			complete_es.testcasesFailed++;
		}
	} // END try

	catch (harnessexceptions :: SystemError &e)// SYSTEM_EXCEPTION
	{
		/* errors like, .test file does not exists or could not be opened etc.*/
		eTime = time (0);
		LOG4CXX_ERROR (logger, e.what ());
		result = RESULT_SYSTEM_EXCEPTION;
		result_str = "FAILED_ON_EXCEPTION";
		LOG4CXX_ERROR (logger, "Worker failed to execute the job completely.");
		failureReason = "Worker failed to execute the job completely. Check log file.";
		complete_es.testcasesFailed++;
	}

# if 0
	catch (harnessexceptions :: ConfigError &e)// CONFIG_EXCEPTION
	{
		/* TODO : this catch() is not required now as all the exceptions from 'defaultexecutor'
 		 * are being locally handled by it and only return code SUCCESS/FAILURE is only being returned.*/
		/* errors like, connection string is not given or port number is not valid etc.*/
		eTime = time (0);
		LOG4CXX_ERROR (logger, e.what ());
		result = RESULT_CONFIG_EXCEPTION;
		result_str = "FAILED_ON_EXCEPTION";
		LOG4CXX_ERROR (logger, "Worker failed to execute the job completely.");
		failureReason = "Worker failed to execute the job completely. Check log file.";
		complete_es.testcasesFailed++;
	}

	catch (harnessexceptions :: ExecutorError &e)// SYSTEM_EXCEPTION
	{
		/* TODO : this catch() is not required now as all the exceptions from 'defaultexecutor'
 		 * are being locally handled by it and only return code SUCCESS/FAILURE is only being returned.*/
		/* most of the exceptions thrown by CAPIs */
		eTime = time (0);
		if (caseexecutor)
		{
			delete caseexecutor;
			caseexecutor = 0;
		}
		LOG4CXX_ERROR (logger, e.what ());
		result = RESULT_SYSTEM_EXCEPTION;
		result_str = "FAILED_ON_EXECUTOR_EXCEPTION";
		LOG4CXX_ERROR (logger, "Test case execution failed because of some Executor exception. Canceling further execution of this test case."
                               " Check respective log file.");
		failureReason = "Test case execution failed because of some Executor exception. Check log file.";
		complete_es.testcasesFailed++;
	}
# endif

	g_test_ei.testID = ie.testID;
	g_test_ei.description = "";
	g_test_ei.sTime = sTime;
	g_test_ei.eTime = eTime;
	g_test_ei.result = result;
	g_test_ei.failureReason = failureReason;

	if (ie.expected_rfile.length() > 0 && !bfs::exists (ie.expected_rfile))
		ie.expected_rfile = "";
	if (ie.actual_rfile.length() > 0 && !bfs::exists (ie.actual_rfile))
		ie.actual_rfile = "";
	if (ie.timerfile.length() > 0 && !bfs::exists (ie.timerfile))
		ie.timerfile = "";
	if (ie.diff_file.length() > 0 && !bfs::exists (ie.diff_file))
		ie.diff_file = "";
	if (ie.log_file.length() > 0 && !bfs::exists (ie.log_file))
		ie.log_file = "";

	time_t rawtime;
	time ( &rawtime );
	std::string _ctime = ctime (&rawtime);
	_ctime = _ctime.substr(0,_ctime.size()-1);

	/* throw this line on console only if other log is going to the harness.log file */
 	if (strcasecmp (ie.logDestination.c_str (), LOGDESTINATION_CONSOLE) != 0)
		cout << "[" << ie.test_sequence_number << "][" << _ctime << "]: " << ie.testID << " .............................................................. " << result_str << endl << std::flush;
	else
		LOG4CXX_INFO (logger, ie.testID << " .............................................................. " << result_str);

	IndividualTestInfo ti(ie, g_test_ei);
	g_rptr->writeTestcaseExecutionInfo (ti);
	g_rptr->writeIntermediateRunStat (complete_es.testcasesPassed, complete_es.testcasesFailed);

	/* unlock */
	complete_es_mutex.unlock();
	LOGGER_POP_NDCTAG;

	return result;
}
Пример #19
0
    void Run()
    {   //cout << "here0" << endl;
        int w, h;

        // Create camera instances
        _cam = CLEyeCreateCamera(_cameraGUID, _mode, _resolution, _fps);
        if(_cam == NULL)	return;

        // Get camera frame dimensions
        CLEyeCameraGetFrameDimensions(_cam, w, h);

        IplImage *pCapImage;

        // Create the OpenCV images
        pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, _isColor ? 4 : 1);

        // Set some camera parameters
        CLEyeSetCameraParameter(_cam, CLEYE_GAIN, 0);
        CLEyeSetCameraParameter(_cam, CLEYE_EXPOSURE, 511);

        // Start capturing
        CLEyeCameraStart(_cam);
        Sleep(100);

        tMin = 1000;
        tMax = 0;
        tAvg = 0;
        curr = 0;
        measuredCnt = 0;
        printf("\n");

        //cout << "here1" << endl;

        // Capture camera images
        PBYTE tempBuffer;
        cvGetImageRawData(pCapImage, &tempBuffer);
        CLEyeCameraGetFrame(_cam, tempBuffer);
        Mat imgTemp(pCapImage);

        mtx.lock();
        imgLines = Mat::zeros( imgTemp.size(), CV_8UC4 );
        imgCapture = Mat::zeros( imgTemp.size(), CV_8UC4 );
        mtx.unlock();

        //cout << "here2" << endl;

        // image capturing loop
        while(_running)
        {
            //oldFrameTimestamp = frameTimestamp;
            //oldFrameTimestamp = getTickCount();
            //cout << "here3" << endl;

            PBYTE pCapBuffer;

            // Capture camera images
            cvGetImageRawData(pCapImage, &pCapBuffer);
            CLEyeCameraGetFrame(_cam, pCapBuffer);

            //IplImage* imgHSV = cvCreateImage(cvGetSize(pCapImage), IPL_DEPTH_8U, 3);
            //cvCvtColor(pCapImage, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV
            //IplImage* imgThresholded = cvCreateImage(cvGetSize(imgHSV),IPL_DEPTH_8U, 1);
            //cvInRangeS(imgHSV, cvScalar(minH,minS,minV), cvScalar(maxH,maxS,maxV), imgThresholded);

            Mat imgCap(pCapImage);		//creates 8UC4 Mat

            mtx.try_lock();
            imgCap.copyTo(imgCapture);	//copy to shared variable
            mtx.unlock();

            //frameTimestamp = getTickCount();
            //double t = ((double)(frameTimestamp - oldFrameTimestamp))/getTickFrequency() * 1000;
            //cout << "Frame interval: " << t << endl;
        }
        Sleep(1000);
        // Stop camera capture
        CLEyeCameraStop(_cam);
        // Destroy camera object
        CLEyeDestroyCamera(_cam);
        // Destroy the allocated OpenCV image
        cvReleaseImage(&pCapImage);
        _cam = NULL;
    }
// IMPORTANT:  We need to receive the point clouds in local coordinates (scanner or robot)
void Mapper::processCloud(unique_ptr<DP> newPointCloud, const std::string& scannerFrame, const ros::Time& stamp, uint32_t seq)
{

	

	// if the future has completed, use the new map
	processNewMapIfAvailable(); // This call lock the tf publication
	cerr << "received new map" << endl;
	
	timer t;

  processingNewCloud = true;
	BoolSetter stopProcessingSetter(processingNewCloud, false);
	
	
	// Convert point cloud
	const size_t goodCount(newPointCloud->features.cols());
	if (goodCount == 0)
	{
		ROS_ERROR("I found no good points in the cloud");
		return;
	}
	
	// Dimension of the point cloud, important since we handle 2D and 3D
	const int dimp1(newPointCloud->features.rows());

	if(!(newPointCloud->descriptorExists("stamps_Msec") && newPointCloud->descriptorExists("stamps_sec") && newPointCloud->descriptorExists("stamps_nsec")))
	{
		const float Msec = round(stamp.sec/1e6);
		const float sec = round(stamp.sec - Msec*1e6);
		const float nsec = round(stamp.nsec);

		const PM::Matrix desc_Msec = PM::Matrix::Constant(1, goodCount, Msec);
		const PM::Matrix desc_sec = PM::Matrix::Constant(1, goodCount, sec);
		const PM::Matrix desc_nsec = PM::Matrix::Constant(1, goodCount, nsec);
		newPointCloud->addDescriptor("stamps_Msec", desc_Msec);
		newPointCloud->addDescriptor("stamps_sec", desc_sec);
		newPointCloud->addDescriptor("stamps_nsec", desc_nsec);

		cout << "Adding time" << endl;
		
	}

	ROS_INFO("Processing new point cloud");
	{
		timer t; // Print how long take the algo
		
		// Apply filters to incoming cloud, in scanner coordinates
		inputFilters.apply(*newPointCloud);
    
    
		ROS_INFO_STREAM("Input filters took " << t.elapsed() << " [s]");
	}
	
	string reason;
	// Initialize the transformation to identity if empty
 	if(!icp.hasMap())
 	{
		// we need to know the dimensionality of the point cloud to initialize properly
		publishLock.lock();
		TOdomToMap = PM::TransformationParameters::Identity(dimp1, dimp1);
		TOdomToMap(2,3) = mapElevation;
		publishLock.unlock();
	}

 
	// Fetch transformation from scanner to odom
	// Note: we don't need to wait for transform. It is already called in transformListenerToEigenMatrix()
	PM::TransformationParameters TOdomToScanner;
	try
	{
		TOdomToScanner = PointMatcher_ros::eigenMatrixToDim<float>(
				PointMatcher_ros::transformListenerToEigenMatrix<float>(
				tfListener,
				scannerFrame,
				odomFrame,
				stamp
			), dimp1);
	}
	catch(tf::ExtrapolationException e)
	{
		ROS_ERROR_STREAM("Extrapolation Exception. stamp = "<< stamp << " now = " << ros::Time::now() << " delta = " << ros::Time::now() - stamp << endl << e.what() );
		return;
	}


	ROS_DEBUG_STREAM("TOdomToScanner(" << odomFrame<< " to " << scannerFrame << "):\n" << TOdomToScanner);
	ROS_DEBUG_STREAM("TOdomToMap(" << odomFrame<< " to " << mapFrame << "):\n" << TOdomToMap);
		
	const PM::TransformationParameters TscannerToMap = TOdomToMap * TOdomToScanner.inverse();
	ROS_DEBUG_STREAM("TscannerToMap (" << scannerFrame << " to " << mapFrame << "):\n" << TscannerToMap);
	
	// Ensure a minimum amount of point after filtering
	const int ptsCount = newPointCloud->features.cols();
	if(ptsCount < minReadingPointCount)
	{
		ROS_ERROR_STREAM("Not enough points in newPointCloud: only " << ptsCount << " pts.");
		return;
	}

	// Initialize the map if empty
 	if(!icp.hasMap())
 	{
		ROS_INFO_STREAM("Creating an initial map");
		mapCreationTime = stamp;
		setMap(updateMap(newPointCloud.release(), TscannerToMap, false));
		// we must not delete newPointCloud because we just stored it in the mapPointCloud
		return;
	}
	
	// Check dimension
	if (newPointCloud->features.rows() != icp.getInternalMap().features.rows())
	{
		ROS_ERROR_STREAM("Dimensionality missmatch: incoming cloud is " << newPointCloud->features.rows()-1 << " while map is " << icp.getInternalMap().features.rows()-1);
		return;
	}
	
	try 
	{
		// Apply ICP
		PM::TransformationParameters Ticp;
		Ticp = icp(*newPointCloud, TscannerToMap);
    Ticp = transformation->correctParameters(Ticp);

    // extract corrections
    PM::TransformationParameters Tdelta = Ticp * TscannerToMap.inverse();
     
		// ISER
		//{
    //  // remove roll and pitch
    //  Tdelta(2,0) = 0; 
    //  Tdelta(2,1) = 0; 
    //  Tdelta(2,2) = 1; 
    //  Tdelta(0,2) = 0; 
    //  Tdelta(1,2) = 0;
    //  Tdelta(2,3) = 0; //z
    //  Tdelta.block(0,0,3,3) = transformation->correctParameters(Tdelta.block(0,0,3,3));

    //  Ticp = Tdelta*TscannerToMap;

    //  ROS_DEBUG_STREAM("Ticp:\n" << Ticp);
		//}

		
		// Ensure minimum overlap between scans
		const double estimatedOverlap = icp.errorMinimizer->getOverlap();
		ROS_INFO_STREAM("Overlap: " << estimatedOverlap);
		if (estimatedOverlap < minOverlap)
		{
			ROS_ERROR_STREAM("Estimated overlap too small, ignoring ICP correction!");
			return;
		}

    // Compute tf
		publishStamp = stamp;
		publishLock.lock();
		TOdomToMap = Ticp * TOdomToScanner;

    PM::TransformationParameters Terror = TscannerToMap.inverse() * Ticp;

    cerr << "Correcting translation error of " << Terror.block(0,3, 3,1).norm() << " m" << endl;

    // Add transformation to path
    path.push_back(Ticp);
    errors.push_back(Terror);

		// Publish tf
		if(publishMapTf == true)
		{
			tfBroadcaster.sendTransform(PointMatcher_ros::eigenMatrixToStampedTransform<float>(TOdomToMap, mapFrame, odomFrame, stamp));
		}

		publishLock.unlock();
		processingNewCloud = false;
		
		ROS_DEBUG_STREAM("TOdomToMap:\n" << TOdomToMap);

		// Publish odometry
		if (odomPub.getNumSubscribers())
			odomPub.publish(PointMatcher_ros::eigenMatrixToOdomMsg<float>(Tdelta, mapFrame, stamp));
	
    // TODO: check that, might be wrong....
		// Publish error on odometry
		if (odomErrorPub.getNumSubscribers())
			odomErrorPub.publish(PointMatcher_ros::eigenMatrixToOdomMsg<float>(TOdomToMap, mapFrame, stamp));

		// ***Debug:
    //debugPub.publish(PointMatcher_ros::pointMatcherCloudToRosMsg<float>(transformation->compute(*newPointCloud, Ticp), mapFrame, mapCreationTime));
		// ***


		// check if news points should be added to the map
		if (
			mapping &&
			((estimatedOverlap < maxOverlapToMerge) || (icp.getInternalMap().features.cols() < minMapPointCount)) &&
			(!mapBuildingInProgress)
    )
		{
			cout << "map Creation..." << endl;
			// make sure we process the last available map
			mapCreationTime = stamp;
			ROS_INFO("Adding new points to the map in background");
			mapBuildingTask = MapBuildingTask(boost::bind(&Mapper::updateMap, this, newPointCloud.release(), Ticp, true));
			mapBuildingFuture = mapBuildingTask.get_future();
			mapBuildingThread = boost::thread(boost::move(boost::ref(mapBuildingTask)));
			mapBuildingInProgress = true;
    }
	}
	catch (PM::ConvergenceError error)
	{
		ROS_ERROR_STREAM("ICP failed to converge: " << error.what());
		return;
	}
	
	//Statistics about time and real-time capability
	int realTimeRatio = 100*t.elapsed() / (stamp.toSec()-lastPoinCloudTime.toSec());
	ROS_INFO_STREAM("[TIME] Total ICP took: " << t.elapsed() << " [s]");
	if(realTimeRatio < 80)
		ROS_INFO_STREAM("[TIME] Real-time capability: " << realTimeRatio << "%");
	else
		ROS_WARN_STREAM("[TIME] Real-time capability: " << realTimeRatio << "%");

	lastPoinCloudTime = stamp;
}
Пример #21
0
// Called when a thread wants to indicate that it has stopped.
void threadStopped()
{
	runningLock.lock();
	runningThreads--;
	runningLock.unlock();
}
Пример #22
0
	void lock()
	{
		mutex.lock();
	}
Пример #23
0
namespace pei
{
    static boost::mutex sMutex;
    static bool isInitialized(false);

    //////////////////////////////////////////////////////////////////////////
    //

    TtFontAsset::TtFontAsset( const char* name, size_t size )
        : FontAsset( name, size )
        , m_FontName(name)
        , m_FontSize(size)
    {
        sMutex.lock();
        if ( !isInitialized ) {
            pei::TtFontFactory().Init();
            isInitialized = true;
        }
        sMutex.unlock();
    }

    TtFontAsset::TtFontAsset( const TtFontAsset& fa )
        : FontAsset(fa.m_FontName.c_str(), fa.m_FontSize)
        , m_FontName(fa.m_FontName)
        , m_FontSize(fa.m_FontSize)
        , m_TtFont(fa.m_TtFont)
    {
    }

    TtFontAsset::~TtFontAsset()
    {
    }

    bool TtFontAsset::CanHandle()
    {
        m_TtFont = pei::TtFontFactory().CreateTtFont( m_FontName.c_str(), m_FontSize );
        return m_TtFont != NULL;
    }

    bool TtFontAsset::Open()
    {
        if ( m_TtFont == NULL ) {
            m_TtFont = pei::TtFontFactory().CreateTtFont( m_FontName.c_str(), m_FontSize );
        }
        return m_TtFont != NULL;
    }

    void TtFontAsset::Close()
    {
        m_TtFont = TtFontPtr();
    }

    int TtFontAsset::GetMemoryUsage()
    {
        return 0;
    }

    std::string TtFontAsset::GetFormatString() const
    {
        return ".ttf";
    }

    const char* TtFontAsset::GetResourceDirectory() const
    {
        return "";
    }

    SurfacePtr TtFontAsset::RenderText( const char *text, const pei::Color& color /*= pei::Color(255,255,255)*/ )
    {
#ifdef _HAS_SDL_TTF_
        if ( m_TtFont ) {
            // this is a hack! We swap r & g and correct the color format manually
            // SDL_ttf renders ARGB, but we need ABGR (or LE RGBA)
            SDL_Color c = { color.b, color.g, color.r, color.a };
            SDL_Surface * s = TTF_RenderText_Blended( (TTF_Font*)m_TtFont->GetFontHandle(), text, c );
            // swap r&g in the format description
            s->format->Rmask = 0x000000ff; s->format->Rshift = 0;
            s->format->Bmask = 0x00ff0000; s->format->Bshift = 16;
            return SurfacePtr( new pei::SDL::SurfaceWrapper(s) );
        }
#endif
        return SurfacePtr();
    }

    void TtFontAsset::RenderText( SurfacePtr dest, int& x, int& y, const char* text, const pei::Color& color /*= pei::XColor(255,255,255)*/ )
    {
        //     assert ( pFont );
        if (!m_TtFont || dest.get() == NULL ) {
            return;
        }
#ifdef _HAS_SDL_TTF_
        SDL_Surface *text_surface = NULL;
        // this is a hack! We swap r & g and correct the color format manually
        // SDL_ttf renders ARGB, but we need ABGR (or LE RGBA) to match RGBA texture format - performace reasons!
        SDL_Color c = { color.b, color.g, color.r, color.a };
        if ( (text_surface = TTF_RenderText_Blended( (TTF_Font*)m_TtFont->GetFontHandle(), text, c )) != NULL )
        {
            // swap r&g in the format description
            text_surface->format->Rmask = 0x000000ff; text_surface->format->Rshift = 0;
            text_surface->format->Bmask = 0x00ff0000; text_surface->format->Bshift = 16;

        	pei::SurfacePtr txt( new pei::SDL::SurfaceWrapper(text_surface));
            pei::Blitter blitter(pei::PixOpPtr(new PixOpAlphaBlitSrcAlpha(txt->GetFormat(),dest->GetFormat()) ) );
            blitter.Blit( txt, dest, x, y, txt->GetWidth(), txt->GetHeight(), 0, 0 );

            int tw, th;
            TTF_SizeText( (TTF_Font*)m_TtFont->GetFontHandle(), text, &tw, &th);

            Uint32 font_height = TTF_FontHeight((TTF_Font*)m_TtFont->GetFontHandle()); // - a - 2*d;
            y += font_height;
            x += tw;
        }
#endif
    }

    void TtFontAsset::RenderTextA( SurfacePtr dest, int x, int y, const char* text, const pei::Color& color /*= pei::XColor(255,255,255)*/ )
    {
        //     assert ( pFont );
        if (!m_TtFont || dest.get() == NULL ) {
            return;
        }
#ifdef _HAS_SDL_TTF_
        SDL_Surface *text_surface = NULL;
        // this is a hack! We swap r & g and correct the color format manually
        // SDL_ttf renders ARGB, but we need ABGR (or LE RGBA) to match RGBA texture format - performace reasons!
        SDL_Color c = { color.b, color.g, color.r, color.a };
        if ( (text_surface = TTF_RenderText_Blended( (TTF_Font*)m_TtFont->GetFontHandle(), text, c )) != NULL )
        {
            // swap r&g in the format description
            text_surface->format->Rmask = 0x000000ff; text_surface->format->Rshift = 0;
            text_surface->format->Bmask = 0x00ff0000; text_surface->format->Bshift = 16;

            pei::SurfacePtr txt( new pei::SDL::SurfaceWrapper(text_surface));
            pei::Blitter blitter(pei::PixOpPtr(new PixOpAlphaBlitSrcAlpha(txt->GetFormat(),dest->GetFormat()) ) );
            blitter.Blit( txt, dest, x, y, txt->GetWidth(), txt->GetHeight(), 0, 0 );
        }
#endif
    }

    pei::Rectangle TtFontAsset::GetTextDimensions( const char* text ) const
    {
        Rectangle rc;
        if ( m_TtFont ) {
            int w(0),h(0);
#ifdef _HAS_SDL_TTF_
            TTF_SizeText( static_cast<TTF_Font*>(m_TtFont->GetFontHandle()), text, &w, &h);
#endif
            rc.w() = w;
            rc.h() = h;
        }
        return rc;
    }
};
Пример #24
0
	void unlock()
	{
		mutex.unlock();
	}
void callbackConfig(pcl_hand_arm_detector::PclHandArmDetectorConfig &config, uint32_t level)
{
  mutex_config.lock();
  g_config = config;
  mutex_config.unlock();
}
Пример #26
0
// Used for keeping track on things that use multithreading
void reportReady2()
{
    mutex_lock.lock();
    threads_ready2++;
    mutex_lock.unlock();
}
int main (int argc, char** argv)
{
  if(pcl::console::find_switch (argc, argv, "--help") || pcl::console::find_switch (argc, argv, "-h"))
        return print_help();

  // Algorithm parameters:
  std::string svm_filename = "../../people/data/trainedLinearSVMForPeopleDetectionWithHOG.yaml";
  float min_confidence = -1.5;
  float min_height = 1.3;
  float max_height = 2.3;
  float voxel_size = 0.06;
  Eigen::Matrix3f rgb_intrinsics_matrix;
  rgb_intrinsics_matrix << 525, 0.0, 319.5, 0.0, 525, 239.5, 0.0, 0.0, 1.0; // Kinect RGB camera intrinsics

  // Read if some parameters are passed from command line:
  pcl::console::parse_argument (argc, argv, "--svm", svm_filename);
  pcl::console::parse_argument (argc, argv, "--conf", min_confidence);
  pcl::console::parse_argument (argc, argv, "--min_h", min_height);
  pcl::console::parse_argument (argc, argv, "--max_h", max_height);

  // Read Kinect live stream:
  PointCloudT::Ptr cloud (new PointCloudT);
  bool new_cloud_available_flag = false;
  pcl::Grabber* interface = new pcl::OpenNIGrabber();
  boost::function<void (const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr&)> f =
      boost::bind (&cloud_cb_, _1, cloud, &new_cloud_available_flag);
  interface->registerCallback (f);
  interface->start ();

  // Wait for the first frame:
  while(!new_cloud_available_flag) 
    boost::this_thread::sleep(boost::posix_time::milliseconds(1));
  new_cloud_available_flag = false;

  cloud_mutex.lock ();    // for not overwriting the point cloud

  // Display pointcloud:
  pcl::visualization::PointCloudColorHandlerRGBField<PointT> rgb(cloud);
  viewer.addPointCloud<PointT> (cloud, rgb, "input_cloud");
  viewer.setCameraPosition(0,0,-2,0,-1,0,0);

  // Add point picking callback to viewer:
  struct callback_args cb_args;
  PointCloudT::Ptr clicked_points_3d (new PointCloudT);
  cb_args.clicked_points_3d = clicked_points_3d;
  cb_args.viewerPtr = pcl::visualization::PCLVisualizer::Ptr(&viewer);
  viewer.registerPointPickingCallback (pp_callback, (void*)&cb_args);
  std::cout << "Shift+click on three floor points, then press 'Q'..." << std::endl;

  // Spin until 'Q' is pressed:
  viewer.spin();
  std::cout << "done." << std::endl;
  
  cloud_mutex.unlock ();    

  // Ground plane estimation:
  Eigen::VectorXf ground_coeffs;
  ground_coeffs.resize(4);
  std::vector<int> clicked_points_indices;
  for (unsigned int i = 0; i < clicked_points_3d->points.size(); i++)
    clicked_points_indices.push_back(i);
  pcl::SampleConsensusModelPlane<PointT> model_plane(clicked_points_3d);
  model_plane.computeModelCoefficients(clicked_points_indices,ground_coeffs);
  std::cout << "Ground plane: " << ground_coeffs(0) << " " << ground_coeffs(1) << " " << ground_coeffs(2) << " " << ground_coeffs(3) << std::endl;

  // Initialize new viewer:
  pcl::visualization::PCLVisualizer viewer("PCL Viewer");          // viewer initialization
  viewer.setCameraPosition(0,0,-2,0,-1,0,0);

  // Create classifier for people detection:  
  pcl::people::PersonClassifier<pcl::RGB> person_classifier;
  person_classifier.loadSVMFromFile(svm_filename);   // load trained SVM

  // People detection app initialization:
  pcl::people::GroundBasedPeopleDetectionApp<PointT> people_detector;    // people detection object
  people_detector.setVoxelSize(voxel_size);                        // set the voxel size
  people_detector.setIntrinsics(rgb_intrinsics_matrix);            // set RGB camera intrinsic parameters
  people_detector.setClassifier(person_classifier);                // set person classifier
  people_detector.setHeightLimits(min_height, max_height);         // set person classifier
//  people_detector.setSensorPortraitOrientation(true);             // set sensor orientation to vertical

  // For timing:
  static unsigned count = 0;
  static double last = pcl::getTime ();

  // Main loop:
  while (!viewer.wasStopped())
  {
    if (new_cloud_available_flag && cloud_mutex.try_lock ())    // if a new cloud is available
    {
      new_cloud_available_flag = false;

      // Perform people detection on the new cloud:
      std::vector<pcl::people::PersonCluster<PointT> > clusters;   // vector containing persons clusters
      people_detector.setInputCloud(cloud);
      people_detector.setGround(ground_coeffs);                    // set floor coefficients
      people_detector.compute(clusters);                           // perform people detection

      ground_coeffs = people_detector.getGround();                 // get updated floor coefficients

      // Draw cloud and people bounding boxes in the viewer:
      viewer.removeAllPointClouds();
      viewer.removeAllShapes();
      pcl::visualization::PointCloudColorHandlerRGBField<PointT> rgb(cloud);
      viewer.addPointCloud<PointT> (cloud, rgb, "input_cloud");
      unsigned int k = 0;
      for(std::vector<pcl::people::PersonCluster<PointT> >::iterator it = clusters.begin(); it != clusters.end(); ++it)
      {
        if(it->getPersonConfidence() > min_confidence)             // draw only people with confidence above a threshold
        {
          // draw theoretical person bounding box in the PCL viewer:
          it->drawTBoundingBox(viewer, k);
          k++;
        }
      }
      std::cout << k << " people found" << std::endl;
      viewer.spinOnce();

      // Display average framerate:
      if (++count == 30)
      {
        double now = pcl::getTime ();
        std::cout << "Average framerate: " << double(count)/double(now - last) << " Hz" <<  std::endl;
        count = 0;
        last = now;
      }
      cloud_mutex.unlock ();
    }
  }

  return 0;
}
Пример #28
0
	virtual bool on_expose_event(GdkEventExpose* )
	{
		Glib::RefPtr<Gdk::Drawable> drawable = get_window();

		vc.get_frame(pixbuf);

		drawable->draw_pixbuf(get_style()->get_bg_gc(Gtk::STATE_NORMAL),
				pixbuf->scale_simple(get_width(), get_height(), Gdk::INTERP_BILINEAR),
				0, 0, 0, 0, -1 , -1, Gdk::RGB_DITHER_NONE, 0, 0);

		Gdk::Point rect[4], barcode_center;
		string str;

		bool res = decode_barcode(pixbuf, rect, str, 100);

		if (res)
		{
			Cairo::RefPtr<Cairo::Context> cr = drawable->create_cairo_context();

			cr->set_line_width(3);
			cr->set_source_rgb(1, 0.0, 0.0);

			cr->move_to(rect[0].get_x(), rect[0].get_y() );
			cr->line_to(rect[1].get_x(), rect[1].get_y() );
			cr->line_to(rect[2].get_x(), rect[2].get_y() );
			cr->line_to(rect[3].get_x(), rect[3].get_y() );
			cr->line_to(rect[0].get_x(), rect[0].get_y() );

			cr->stroke();

			BarcodePropertyEstimation distance(get_width(), get_height(),
					rect[0].get_x(), rect[0].get_y(),
					rect[1].get_x(), rect[1].get_y(),
					rect[2].get_x(), rect[2].get_y(),
					rect[3].get_x(), rect[3].get_y());


			Glib::RefPtr<Pango::Layout> pango_layout = Pango::Layout::create(cr);

			Pango::FontDescription desc("Arial Rounded MT Bold");
			desc.set_size(14 * PANGO_SCALE);
			pango_layout->set_font_description(desc);
			pango_layout->set_alignment(Pango::ALIGN_CENTER);

			double dir = distance.calcDirection();
			string dir_str;

			if (dir > 2.3)
				dir_str = "right";
			else if (dir < -2.3)
				dir_str = "left";
			else
				dir_str = "front";

			double dist = distance.calcDistance();
			string text = "Object ID: " + str +
					"\nDistance: " + lexical_cast<string>(int(dist * 100) / 100) + "." +
					lexical_cast<string>(int(dist * 100) % 100) +
					"\nDirection: " + dir_str;

			pango_layout->set_text(text);

			Gdk::Point* max_x, *min_x, *max_y, *min_y;
			boost::function<bool (Gdk::Point, Gdk::Point)> compare;

			compare = bind(less<int>(), bind(&Gdk::Point::get_x, _1), bind(&Gdk::Point::get_x, _2));
			max_x = max_element(rect, rect + 4, compare);
			min_x =	min_element(rect, rect + 4, compare);

			compare = bind(less<int>(), bind(&Gdk::Point::get_y, _1), bind(&Gdk::Point::get_y, _2));
			max_y = max_element(rect, rect + 4, compare);
			min_y =	min_element(rect, rect + 4, compare);

			barcode_center = Gdk::Point(
					(max_x->get_x() + min_x->get_x()) / 2,
					(max_y->get_y() + min_y->get_y()) / 2);

			const Pango::Rectangle extent = pango_layout->get_pixel_logical_extents();

			cr->move_to(get_width() - extent.get_width() - 10,
					get_height() - extent.get_height() - 10);

			pango_layout->show_in_cairo_context(cr);

			DetectedBarcode barcode(str, dir_str, distance.calcDistance());
			vector<DetectedBarcode> vec;
			vec.push_back(barcode);

			ostringstream ostr;
			xmlpp::Document document1;

			listDetectedBarcodesToXML(document1.create_root_node("ObjectList"), vec);

			document1.write_to_stream(ostr, "UTF-8");

			mut.lock();
			strXML = ostr.str();
			mut.unlock();

		}

		return true;
	}
Пример #29
0
	void releaseContext() {
		update_mutex.unlock();
	}
Пример #30
0
void free_adjlst(struct adjlst<edge_t> * adjl) {
    free_mutex.lock();
    free(adjl->edges);
    free(adjl);
    free_mutex.unlock();
}