Ejemplo n.º 1
0
/*
 * call-seq:
 *   image[rect] -> WX::Image or nil
 *   image[x,y] -> WX::Color or nil
 *
 * if giving a WX::Rect, return a sub image of the given place
 * if giving x and y, return the color of the given position or nil when out of range
 * ===Arguments
 * * x and y are Integer
 * * rect is a WX::Rect
 * ===Return value
 * WX::Image, WX::Color or nil
 * === Exceptions
 * [ArgumentError]
 * * rect does have negative size
 * * rect does not fit into the Size of the Image
*/
DLL_LOCAL VALUE _get(int argc,VALUE *argv,VALUE self)
{
	VALUE vx,vy;
	rb_scan_args(argc, argv, "11",&vx,&vy);
	if(_self->IsOk())
	{
		if(NIL_P(vy))
			return _getSubImage(self, vy);

		int x,y;
		x = RB_NUM2UINT(vx);
		y = RB_NUM2UINT(vy);

		if(!check_inside(x,y, _self->GetSize()))
			return Qnil;

		unsigned char red,green,blue,alpha;
		red = _self->GetRed(x,y);
		green = _self->GetGreen(x,y);
		blue = _self->GetBlue(x,y);
		if(_self->HasAlpha())
			alpha = _self->GetAlpha(x,y);
		else
			alpha = wxALPHA_OPAQUE;
		return wrap(new wxColor(red,green,blue,alpha));
	}else
		return Qnil;
}
Ejemplo n.º 2
0
/**
* Checks if plg2 is inside plg1
*/
bool is_inside(Polygon_2 plg1, Polygon_2 plg2) {
    bool ishole = true;
    for (std::vector<Point>::iterator v2 = plg2.vertices_begin();
            v2 != plg2.vertices_end();
            ++v2) {
        if(! check_inside(*v2, plg1, K())) {
            ishole = false;
            break; //stop checking
        }
    }
    return ishole;
}
Ejemplo n.º 3
0
void set_at_pos(int x, int y, wxImage* self, VALUE val)
{
	if(!check_inside(x, y, self->GetSize()))
		return;

	if(rb_obj_is_kind_of(val,rb_cWXImage) || rb_obj_is_kind_of(val,rb_cWXBitmap))
	{
		self->Paste(unwrap<wxImage>(val),x,y);
	} else {
		wxColor c(unwrap<wxColor>(val));
		self->SetRGB(x,y,c.Red(),c.Green(),c.Blue());
		if(self->HasAlpha())
			self->SetAlpha(x,y,c.Alpha());
	}
}
Ejemplo n.º 4
0
/*
 * call-seq:
 *   image[x,y]= WX::Color or WX::Image or WX::Bitmap
 *   image[pos]= WX::Color or WX::Image or WX::Bitmap
 *   image[rect]= WX::Color
 *
 * if giving x and y or pos, and as value a color, sets the color at the given position
 * if giving x and y or pos, and an image or bitmap does paste it at the given position
 * if giving a WX::Rect, fill the color at the place with the given one
 * ===Arguments
 * * x and y are Integer
 * * pos is a WX::Point
 * * rect is a WX::Rect
 *
 * === Exceptions
 * [ArgumentError]
 * * rect does not fit into the Size of the Image
 */
DLL_LOCAL VALUE _set(int argc,VALUE *argv,VALUE self)
{
	VALUE vx,vy,value;
	rb_scan_args(argc, argv, "21",&vx,&vy,&value);

	rb_check_frozen(self);

	if(_self->IsOk())
	{
		if(NIL_P(value)) {
			if(is_wrapable<wxRect>(vx)) {
				wxColor c(unwrap<wxColor>(vy));
				wxSize size(_self->GetSize());
				wxRect vrect;

				if(check_contain_rect(_GetSize(self), size, vx, vrect))
				{
					_self->SetRGB(vrect,c.Red(),c.Green(),c.Blue());
					if(_self->HasAlpha()) {
						for(int i = vrect.x; i <= vrect.GetRight(); ++i)
							for(int j = vrect.y; j <= vrect.GetBottom(); ++j)
								if(check_inside(i, j, size))
									_self->SetAlpha(i,j,c.Alpha());
					}
				}
			} else {
				wxPoint vpoint(unwrap<wxPoint>(vx));
				set_at_pos(vpoint.x, vpoint.y, _self, vy);
			}
		} else {
			set_at_pos(RB_NUM2UINT(vx), RB_NUM2UINT(vy), _self, value);
		}
	}

	return NIL_P(value) ? value : vy;
}
Ejemplo n.º 5
0
void pcl::face_detection::FaceDetectorDataProvider<FeatureType, DataSet, LabelType, ExampleIndex, NodeType>::getDatasetAndLabels(DataSet & data_set,
    std::vector<LabelType> & label_data, std::vector<ExampleIndex> & examples)
{
  srand (static_cast<unsigned int>(time (NULL)));
  std::random_shuffle (image_files_.begin (), image_files_.end ());
  std::vector < std::string > files;
  files = image_files_;
  files.resize (std::min (num_images_, static_cast<int> (files.size ())));

  std::vector < TrainingExample > training_examples;
  std::vector<float> labels;

  int total_neg, total_pos;
  total_neg = total_pos = 0;

#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
  pcl::visualization::PCLVisualizer vis("training");
#endif

  for (size_t j = 0; j < files.size (); j++)
  {

#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
    vis.removeAllPointClouds();
    vis.removeAllShapes();
#endif

    if ((j % 50) == 0)
    {
      std::cout << "Loading image..." << j << std::endl;
    }
    //1. Load clouds with and without labels
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
    pcl::PointCloud<pcl::PointXYZ>::Ptr loaded_cloud (new pcl::PointCloud<pcl::PointXYZ>);

    pcl::io::loadPCDFile (files[j], *loaded_cloud);

    pcl::PointCloud<pcl::PointXYZL>::Ptr cloud_labels (new pcl::PointCloud<pcl::PointXYZL>);
    pcl::PointCloud<pcl::PointXYZL>::Ptr loaded_cloud_labels (new pcl::PointCloud<pcl::PointXYZL>);
    pcl::io::loadPCDFile (files[j], *loaded_cloud_labels);

    //crop images to remove as many NaNs as possible and reduce the memory footprint
    {
      size_t min_col, min_row;
      size_t max_col, max_row;
      min_col = min_row = std::numeric_limits<size_t>::max ();
      max_col = max_row = 0;

      for (size_t col = 0; col < loaded_cloud->width; col++)
      {
        for (size_t row = 0; row < loaded_cloud->height; row++)
        {
          if (pcl::isFinite (loaded_cloud->at (col, row)))
          {
            if (row < min_row)
              min_row = row;

            if (row > max_row)
              max_row = row;

            if (col < min_col)
              min_col = col;

            if (col > max_col)
              max_col = col;
          }
        }
      }

      //std::cout << min_col << " - " << max_col << std::endl;
      //std::cout << min_row << " - " << max_row << std::endl;

      cropCloud<pcl::PointXYZ> (min_col, max_col, min_row, max_row, *loaded_cloud, *cloud);
      cropCloud<pcl::PointXYZL> (min_col, max_col, min_row, max_row, *loaded_cloud_labels, *cloud_labels);

      /*pcl::visualization::PCLVisualizer vis ("training");
       vis.addPointCloud(loaded_cloud);
       vis.spin();*/
    }

    //Compute integral image over depth
    boost::shared_ptr < pcl::IntegralImage2D<float, 1> > integral_image_depth;
    integral_image_depth.reset (new pcl::IntegralImage2D<float, 1> (false));

    int element_stride = sizeof(pcl::PointXYZ) / sizeof(float);
    int row_stride = element_stride * cloud->width;
    const float *data = reinterpret_cast<const float*> (&cloud->points[0]);
    integral_image_depth->setInput (data + 2, cloud->width, cloud->height, element_stride, row_stride);

    //Compute normals and normal integral images
    pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);

    if (USE_NORMALS_)
    {
      typedef typename pcl::IntegralImageNormalEstimation<pcl::PointXYZ, pcl::Normal> NormalEstimator_;
      NormalEstimator_ n3d;
      n3d.setNormalEstimationMethod (n3d.COVARIANCE_MATRIX);
      n3d.setInputCloud (cloud);
      n3d.setRadiusSearch (0.02);
      n3d.setKSearch (0);
      {
        pcl::ScopeTime t ("compute normals...");
        n3d.compute (*normals);
      }
    }

    int element_stride_normal = sizeof(pcl::Normal) / sizeof(float);
    int row_stride_normal = element_stride_normal * normals->width;
    boost::shared_ptr < pcl::IntegralImage2D<float, 1> > integral_image_normal_x;
    boost::shared_ptr < pcl::IntegralImage2D<float, 1> > integral_image_normal_y;
    boost::shared_ptr < pcl::IntegralImage2D<float, 1> > integral_image_normal_z;

    if (USE_NORMALS_)
    {
      integral_image_normal_x.reset (new pcl::IntegralImage2D<float, 1> (false));
      const float *data_nx = reinterpret_cast<const float*> (&normals->points[0]);
      integral_image_normal_x->setInput (data_nx, normals->width, normals->height, element_stride_normal, row_stride_normal);

      integral_image_normal_y.reset (new pcl::IntegralImage2D<float, 1> (false));
      const float *data_ny = reinterpret_cast<const float*> (&normals->points[0]);
      integral_image_normal_y->setInput (data_ny + 1, normals->width, normals->height, element_stride_normal, row_stride_normal);

      integral_image_normal_z.reset (new pcl::IntegralImage2D<float, 1> (false));
      const float *data_nz = reinterpret_cast<const float*> (&normals->points[0]);
      integral_image_normal_z->setInput (data_nz + 2, normals->width, normals->height, element_stride_normal, row_stride_normal);
    }

    //Using cloud labels estimate a 2D window from where to extract positive samples
    //Rest can be used to extract negative samples
    size_t min_col, min_row;
    size_t max_col, max_row;
    min_col = min_row = std::numeric_limits<size_t>::max ();
    max_col = max_row = 0;

    //std::cout << cloud_labels->width << " " << cloud_labels->height << std::endl;

    for (size_t col = 0; col < cloud_labels->width; col++)
    {
      for (size_t row = 0; row < cloud_labels->height; row++)
      {
        if (cloud_labels->at (col, row).label == 1)
        {
          if (row < min_row)
            min_row = row;

          if (row > max_row)
            max_row = row;

          if (col < min_col)
            min_col = col;

          if (col > max_col)
            max_col = col;
        }
      }
    }

#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
    pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_intensity(new pcl::PointCloud<pcl::PointXYZI>);
    cloud_intensity->width = cloud->width;
    cloud_intensity->height = cloud->height;
    cloud_intensity->points.resize(cloud->points.size());
    cloud_intensity->is_dense = cloud->is_dense;

    for (int jjj = 0; jjj < static_cast<int>(cloud->points.size()); jjj++)
    {
      cloud_intensity->points[jjj].getVector4fMap() = cloud->points[jjj].getVector4fMap();
      cloud_intensity->points[jjj].intensity = 0.f;
      int row, col;
      col = jjj % cloud->width;
      row = jjj / cloud->width;
      //std::cout << row << " " << col << std::endl;
      if (check_inside(col, row, min_col, max_col, min_row, max_row))
      {
        cloud_intensity->points[jjj].intensity = 1.f;
      }
    }

    pcl::visualization::PointCloudColorHandlerGenericField < pcl::PointXYZI > handler(cloud_intensity, "intensity");
    vis.addPointCloud(cloud_intensity, handler, "intensity_cloud");
#endif

    std::string pose_file (files[j]);
    boost::replace_all (pose_file, ".pcd", "_pose.txt");

    Eigen::Matrix4f pose_mat;
    pose_mat.setIdentity (4, 4);
    readMatrixFromFile (pose_file, pose_mat);

    Eigen::Vector3f ea = pose_mat.block<3, 3> (0, 0).eulerAngles (0, 1, 2);
    Eigen::Vector3f trans_vector = Eigen::Vector3f (pose_mat (0, 3), pose_mat (1, 3), pose_mat (2, 3));

    pcl::PointXYZ center_point;
    center_point.x = trans_vector[0];
    center_point.y = trans_vector[1];
    center_point.z = trans_vector[2];

    int N_patches = patches_per_image_;
    int pos_extracted = 0;
    int neg_extracted = 0;
    int w_size_2 = static_cast<int> (w_size_ / 2);

    //************************************************
    //2nd training style, fanelli's journal description
    //************************************************
    {

      typedef std::pair<int, int> pixelpair;
      std::vector < pixelpair > negative_p, positive_p;
      //get negative and positive indices to sample from
      for (int col = 0; col < (static_cast<int> (cloud_labels->width) - w_size_); col++)
      {
        for (int row = 0; row < (static_cast<int> (cloud_labels->height) - w_size_); row++)
        {
          if (!pcl::isFinite (cloud->at (col + w_size_2, row + w_size_2)))
            continue;

          //reject patches with more than percent invalid values
          float percent = 0.5f;
          if (static_cast<float>(integral_image_depth->getFiniteElementsCount (col, row, w_size_, w_size_)) < (percent * static_cast<float>(w_size_ * w_size_)))
            continue;

          pixelpair pp = std::make_pair (col, row);
          if (cloud_labels->at (col + w_size_2, row + w_size_2).label == 1)
            positive_p.push_back (pp);
          else
            negative_p.push_back (pp);
        }
      }

      //shuffle and resize
      std::random_shuffle (positive_p.begin (), positive_p.end ());
      std::random_shuffle (negative_p.begin (), negative_p.end ());
      positive_p.resize (N_patches);
      negative_p.resize (N_patches);

      //extract positive patch
      for (size_t p = 0; p < positive_p.size (); p++)
      {
        int col, row;
        col = positive_p[p].first;
        row = positive_p[p].second;

        pcl::PointXYZ patch_center_point;
        patch_center_point.x = cloud->at (col + w_size_2, row + w_size_2).x;
        patch_center_point.y = cloud->at (col + w_size_2, row + w_size_2).y;
        patch_center_point.z = cloud->at (col + w_size_2, row + w_size_2).z;

        TrainingExample te;
        te.iimages_.push_back (integral_image_depth);
        if (USE_NORMALS_)
        {
          te.iimages_.push_back (integral_image_normal_x);
          te.iimages_.push_back (integral_image_normal_y);
          te.iimages_.push_back (integral_image_normal_z);
        }

        te.row_ = row;
        te.col_ = col;
        te.wsize_ = w_size_;

        te.trans_ = center_point.getVector3fMap () - patch_center_point.getVector3fMap ();
        te.trans_ *= 1000.f; //transform it to millimeters
        te.rot_ = ea;
        te.rot_ *= 57.2957795f; //transform it to degrees

        labels.push_back (1);
        pos_extracted++;
        total_pos++;

        training_examples.push_back (te);
#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
        pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_intensity2(new pcl::PointCloud<pcl::PointXYZI>(*cloud_intensity));
        for (int jjj = col; jjj < (col + w_size_); jjj++)
        {
          for (int iii = row; iii < (row + w_size_); iii++)
          {
            cloud_intensity2->at(jjj, iii).intensity = 2.f;
          }
        }

        vis.removeAllPointClouds();

        pcl::visualization::PointCloudColorHandlerGenericField < pcl::PointXYZI > handler(cloud_intensity2, "intensity");
        vis.addPointCloud(cloud_intensity2, handler, "cloud");
        vis.spinOnce();
        vis.spin();
        sleep(1);
#endif

      }

#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
      std::cout << "Going to extract negative patches..." << std::endl;
      sleep(2);
#endif

      for (size_t p = 0; p < negative_p.size (); p++)
      {
        int col, row;
        col = negative_p[p].first;
        row = negative_p[p].second;

        TrainingExample te;
        te.iimages_.push_back (integral_image_depth);
        if (USE_NORMALS_)
        {
          te.iimages_.push_back (integral_image_normal_x);
          te.iimages_.push_back (integral_image_normal_y);
          te.iimages_.push_back (integral_image_normal_z);
        }

        te.row_ = row;
        te.col_ = col;
        te.wsize_ = w_size_;
        labels.push_back (0);
        neg_extracted++;
        total_neg++;

        training_examples.push_back (te);
#if PCL_FACE_DETECTION_VIS_TRAINING_FDDP == 1
        pcl::PointCloud<pcl::PointXYZI>::Ptr cloud_intensity2(new pcl::PointCloud<pcl::PointXYZI>(*cloud_intensity));
        for (int jjj = col; jjj < (col + w_size_); jjj++)
        {
          for (int iii = row; iii < (row + w_size_); iii++)
          {
            cloud_intensity2->at(jjj, iii).intensity = 2.f;
          }
        }

        vis.removeAllPointClouds();

        pcl::visualization::PointCloudColorHandlerGenericField < pcl::PointXYZI > handler(cloud_intensity2, "intensity");
        vis.addPointCloud(cloud_intensity2, handler, "cloud");
        vis.spinOnce();
        vis.spin();
        sleep(1);
#endif
      }

      if (neg_extracted != N_patches)
      {
        std::cout << "Extracted " << neg_extracted << " negative patches" << std::endl;
        std::cout << files[j] << std::endl;
      }

      if (pos_extracted != N_patches)
      {
        std::cout << "Extracted " << pos_extracted << " positive patches" << std::endl;
        std::cout << files[j] << std::endl;
      }
    }
  }

  std::cout << training_examples.size () << " " << labels.size () << " " << total_neg << " " << total_pos << std::endl;

  //train random forest and make persistent
  std::vector<int> example_indices;
  for (size_t i = 0; i < labels.size (); i++)
    example_indices.push_back (static_cast<int> (i));

  label_data = labels;
  data_set = training_examples;
  examples = example_indices;
}