Пример #1
0
int main( int argc, char **argv ) {

	daw::rfb::RFBServer server{640, 480, daw::rfb::BitDepth::eight};

	auto th = std::thread( [&]( ) {
		std::random_device rd;
		std::mt19937 gen{rd( )};
		std::uniform_int_distribution<uint16_t> dist_x{0, server.max_x( )};
		std::uniform_int_distribution<uint16_t> dist_y{0, server.max_y( )};
		std::uniform_int_distribution<uint8_t> dist_c{0, 255};

		while( true ) {
			auto x = dist_x( gen );
			auto y = dist_y( gen );
			uint16_t width = 20;
			uint16_t height = 20;
			if( x + width >= server.width( ) ) {
				width = ( server.width( ) - x ) - 1;
			}
			if( y + height >= server.height( ) ) {
				height = ( server.height( ) - y ) - 1;
			}
			draw_rectagle( server, x, y, x + width, y + height, {dist_c( gen ), dist_c( gen ), dist_c( gen ), 0} );

			std::this_thread::sleep_for( std::chrono::seconds( 2 ) );
		}
	} );
	server.listen( 1234, daw::nodepp::lib::net::ip_version::ipv4 );

	return EXIT_SUCCESS;
}
// -----------------------------------------------------------------------------
//
// Purpose and Method:
// Inputs:
// Outputs:
// Dependencies:
// Restrictions and Caveats:
//
// -----------------------------------------------------------------------------
void
trainTree
  (
  ForestParam mp_param,
  std::vector< std::vector<FaceAnnotation> > &ann,
  int idx_forest,
  int idx_tree
  )
{
  // Try to read the facial feature conditional regression tree
  char tree_path[200];
  sprintf(tree_path, "%s/forest_%d/tree_%03d.txt", mp_param.tree_path.c_str(), idx_forest, idx_tree);
  PRINT("Read facial feature regression tree: " << tree_path);
  Tree<MPSample> *tree;
  bool is_tree_load = Tree<MPSample>::load(&tree, tree_path);

  // Estimate the number of images available for each class
  int imgs_per_class = mp_param.nimages;
  for (unsigned int i=0; i < ann.size(); i++)
    imgs_per_class = std::min(imgs_per_class, static_cast<int>(ann[i].size()));
  PRINT("Number of images per class: " << imgs_per_class);

  // Random annotations for this head pose
  srand(idx_tree+1);
  std::vector<FaceAnnotation> annotations;
  std::random_shuffle(ann[idx_forest].begin(), ann[idx_forest].end());
  annotations.insert(annotations.end(), ann[idx_forest].begin(), ann[idx_forest].begin()+imgs_per_class);

  std::vector<MPSample*> mp_samples;
  mp_samples.reserve(annotations.size()*mp_param.npatches);
  PRINT("Total number of images: " << annotations.size());
  PRINT("Reserved patches: " << annotations.size()*mp_param.npatches);

  boost::mt19937 rng;
  rng.seed(idx_tree+1);
  boost::progress_display show_progress(annotations.size());
  for (int i=0; i < annotations.size(); i++, ++show_progress)
  {
    // Load image
    TRACE("Evaluate image: " << annotations[i].url);
    cv::Mat img = loadImage(mp_param.image_path, annotations[i].url);
    if (img.empty())
    {
      ERROR("Could not load: " << annotations[i].url);
      continue;
    }

    // Convert image to gray scale
    cv::Mat img_gray;
    cv::cvtColor(img, img_gray, cv::COLOR_BGR2GRAY);

    // Scale image and annotations
    float scale = static_cast<float>(mp_param.face_size)/static_cast<float>(annotations[i].bbox.width);
    cv::Mat img_scaled;
    cv::resize(img_gray, img_scaled, cv::Size(img_gray.cols*scale, img_gray.rows*scale), 0, 0);
    annotations[i].bbox.x *= scale;
    annotations[i].bbox.y *= scale;
    annotations[i].bbox.width *= scale;
    annotations[i].bbox.height *= scale;
    for (unsigned int j=0; j < annotations[i].parts.size(); j++)
      annotations[i].parts[j] *= scale;

    // Extract face image and enlarge to make sure that all facial features are enclosed
    cv::Rect enlarge_bbox;
    enlargeFace(img_scaled, enlarge_bbox, annotations[i]);
    cv::Mat img_roi = img_scaled(enlarge_bbox);

    // Extract patches from this image sample
    ImageSample *sample = new ImageSample(img_roi, mp_param.features, false);

    // Extract positive patches
    int patch_size = mp_param.getPatchSize();
    boost::uniform_int<> dist_x(1, img_roi.cols-patch_size-2);
    boost::uniform_int<> dist_y(1, img_roi.rows-patch_size-2);
    boost::variate_generator< boost::mt19937&, boost::uniform_int<> > rand_x(rng, dist_x);
    boost::variate_generator< boost::mt19937&, boost::uniform_int<> > rand_y(rng, dist_y);
    for (int j=0; j < mp_param.npatches; j++)
    {
      cv::Rect bbox = cv::Rect(rand_x(), rand_y(), patch_size, patch_size);
      MPSample *mps = new MPSample(sample, bbox, annotations[i].parts, mp_param.face_size, true);
      mp_samples.push_back(mps);
    }
  }
  PRINT("Used patches: " << mp_samples.size());

  if (is_tree_load && !tree->isFinished())
    tree->update(mp_samples, &rng);
  else
    tree = new Tree<MPSample>(mp_samples, mp_param, &rng, tree_path);
};