Beispiel #1
0
int main (int argc, char* argv[]) {
    if (argc < 3) {
        LOGERROR << "USAGE: " << argv[0] << " <dataset config file> <net config file>";
        LOGEND;
        return -1;
    }

    std::string net_config_fname (argv[2]);
    std::string dataset_config_fname (argv[1]);

    std::ostringstream ss;

    Conv::System::Init();

    // Open network and dataset configuration files
    std::ifstream net_config_file (net_config_fname, std::ios::in);
    std::ifstream dataset_config_file (dataset_config_fname, std::ios::in);

    if (!net_config_file.good()) {
        FATAL ("Cannot open net configuration file!");
    }

    net_config_fname = net_config_fname.substr (net_config_fname.rfind ("/") + 1);

    if (!dataset_config_file.good()) {
        FATAL ("Cannot open dataset configuration file!");
    }

    dataset_config_fname = dataset_config_fname.substr (net_config_fname.rfind ("/") + 1);

    // Parse network configuration file
    Conv::ConfigurableFactory* factory = new Conv::ConfigurableFactory (net_config_file, 8347734, true);
    factory->InitOptimalSettings();

    // Extract important settings from parsed configuration
    Conv::TrainerSettings settings = factory->optimal_settings();
    settings.pbatchsize = 1;
    unsigned int BATCHSIZE = settings.pbatchsize;
    LOGDEBUG << "Optimal settings: " << settings;

    // Load dataset
    Conv::Dataset* dataset = nullptr;
    if (factory->method() == Conv::PATCH) {
        dataset = Conv::TensorStreamPatchDataset::CreateFromConfiguration(dataset_config_file, false, Conv::LOAD_BOTH, factory->patchsizex(), factory->patchsizey());
    }
    else if (factory->method() == Conv::FCN) {
        dataset = Conv::TensorStreamDataset::CreateFromConfiguration(dataset_config_file, false, Conv::LOAD_BOTH);
    }

    unsigned int CLASSES = dataset->GetClasses();

    // Assemble net
    Conv::Net net;
    int data_layer_id = 0;

    Conv::DatasetInputLayer* data_layer = nullptr;

    data_layer = new Conv::DatasetInputLayer (*dataset, BATCHSIZE, 1.0, 983923);
    data_layer_id = net.AddLayer (data_layer);

    int output_layer_id =
        factory->AddLayers (net, Conv::Connection (data_layer_id), CLASSES, true, ss);

    LOGDEBUG << "Output layer id: " << output_layer_id;

    Conv::NetGraphNode* data_node = new Conv::NetGraphNode(data_layer);
    data_node->is_input = true;

    Conv::NetGraph graph;
    graph.AddNode(data_node);
    bool completeness = factory->AddLayers(graph, Conv::NetGraphConnection(data_node, 0), CLASSES, true);

    graph.Initialize();

    LOGINFO << "Complete: " << completeness;

    if(!completeness)
        FATAL("Graph completeness test failed!");

    LOGINFO << "DONE!";
    LOGEND;

    std::cout << "\ndigraph G {\n";
    graph.PrintGraph(std::cout);
    std::cout << "}\n";
    //std::cout << "\nGraph output:\ndigraph G {\n" << ss.str() << "\n}\n";
    return 0;
}
Beispiel #2
0
int main(int argc, char* argv[]) {
  if (argc < 2) {
    LOGERROR << "USAGE: " << argv[0] << " <dataset configuration file>";
    LOGEND;
    return -1;
  }

  Conv::System::Init();


  // Open tensor stream
  std::string dataset_config_file(argv[1]);
  
  std::ifstream dataset_config_fstream(dataset_config_file, std::ios::in);
  if(!dataset_config_fstream.good()) {
    FATAL("Cannot open " << dataset_config_file << "!");
  }
  
  Conv::Dataset* dataset = Conv::TensorStreamDataset::CreateFromConfiguration(dataset_config_fstream);

  Conv::Tensor data_tensor(1, dataset->GetWidth(), dataset->GetHeight(), dataset->GetInputMaps());
  Conv::Tensor weight_tensor(1, dataset->GetWidth(), dataset->GetHeight(), 1);
  Conv::Tensor label_tensor(1, dataset->GetWidth(), dataset->GetHeight(), dataset->GetLabelMaps());
  Conv::Tensor helper_tensor(1, dataset->GetWidth(), dataset->GetHeight(), 2);
  
  std::vector<Conv::datum> class_weights = dataset->GetClassWeights();
  std::vector<std::string> class_names = dataset->GetClassNames();
  
  long double* pixel_counts = new long double[dataset->GetClasses()];
  long double* pixel_counts_weighted = new long double[dataset->GetClasses()];
  for(unsigned int clazz = 0; clazz < dataset->GetClasses(); clazz++) {
    pixel_counts[clazz] = 0;
    pixel_counts_weighted[clazz] = 0;
  }
  
  for(unsigned int sample = 0; sample < dataset->GetTrainingSamples(); sample++) {
    LOGINFO << "Processing sample " << sample+1 << "/" << dataset->GetTrainingSamples() << std::flush;
    dataset->GetTrainingSample(data_tensor, label_tensor, helper_tensor, weight_tensor, 0, sample);
    for(unsigned int y = 0; y < dataset->GetHeight(); y++) {
      for(unsigned int x = 0; x < dataset->GetWidth(); x++) {
        unsigned int pixel_class = label_tensor.PixelMaximum(x,y,0);
        Conv::datum weight = *weight_tensor.data_ptr_const(x,y);
        
        pixel_counts[pixel_class] += (long double)weight;
        pixel_counts_weighted[pixel_class] += (long double)(weight * class_weights[pixel_class]);
      }
    }
  }
  
  long double total_pixels = 0;
  long double total_pixels_weighted = 0;
  long double total_classes = 0;
  long double total_classes_weighted = 0;
  for(unsigned int clazz = 0; clazz < dataset->GetClasses(); clazz++) {
    total_pixels += pixel_counts[clazz];
    total_pixels_weighted += pixel_counts_weighted[clazz];
    if(pixel_counts[clazz] > 0)
      total_classes++;
    if(pixel_counts_weighted[clazz] > 0)
      total_classes_weighted++;
  }
  long double expected_ratio = 1.0 / total_classes;
  long double expected_ratio_weighted = 1.0 / total_classes_weighted;
  long double correction_ratio_sum = 0;
  long double correction_ratio_sum_weighted = 0;
  for(unsigned int clazz = 0; clazz < dataset->GetClasses(); clazz++) {
    if(pixel_counts[clazz] > 0)
      correction_ratio_sum += expected_ratio/(pixel_counts[clazz]/total_pixels);
    if(pixel_counts_weighted[clazz] > 0)
      correction_ratio_sum_weighted += expected_ratio_weighted/(pixel_counts_weighted[clazz]/total_pixels_weighted);
  }
  
  // Ignoring weights
  LOGINFO << "Stats when ignoring weights";
  LOGINFO << "===========================";
  LOGINFO << "Classes counted: " << total_classes;
  LOGINFO << "Expected ratio: " << 100.0 * expected_ratio << "%";
  for(unsigned int clazz = 0; clazz < dataset->GetClasses(); clazz++) {
    long double actual_ratio = pixel_counts[clazz]/total_pixels;
    long double correction_ratio = 0;
    if(pixel_counts[clazz] > 0) {
      correction_ratio = expected_ratio / actual_ratio;
    }
    LOGINFO << "Class " << std::setw(30) << class_names[clazz] << " | " << std::setw(14) << static_cast<long>(pixel_counts[clazz]) << std::setw(14) << 100.0 * actual_ratio << "%" << std::setw(14) << correction_ratio << std::setw(14) << static_cast<long>(correction_ratio * pixel_counts[clazz]);
  }
  
  // Not ignoring weights
  LOGINFO << "Stats when not ignoring weights";
  LOGINFO << "===========================";
  LOGINFO << "Classes counted: " << total_classes_weighted;
  LOGINFO << "Expected ratio: " << 100.0 * expected_ratio_weighted << "%";
  for(unsigned int clazz = 0; clazz < dataset->GetClasses(); clazz++) {
    long double actual_ratio = pixel_counts_weighted[clazz]/total_pixels_weighted;
    long double correction_ratio = 0;
    if(pixel_counts_weighted[clazz] > 0) {
      correction_ratio = expected_ratio_weighted / actual_ratio;
    }
    LOGINFO << "Class " << std::setw(30) << class_names[clazz] << " | " << std::setw(14) << static_cast<long>(pixel_counts_weighted[clazz]) << std::setw(14) << 100.0 * actual_ratio << "%" << std::setw(14) << correction_ratio << std::setw(14) << static_cast<long>(correction_ratio * pixel_counts_weighted[clazz]);
  }
  

  LOGINFO << "DONE!";
  LOGEND;
  
  return 0;
}
Beispiel #3
0
int main (int argc, char* argv[]) {
  if (argc < 6) {
    LOGERROR << "USAGE: " << argv[0] << " <dataset config file> <net config file> <net parameter tensor> <input image file> <output image file>";
    LOGEND;
    return -1;
  }

  // Capture command line arguments
  std::string output_image_fname (argv[5]);
  std::string input_image_fname (argv[4]);
  std::string param_tensor_fname (argv[3]);
  std::string net_config_fname (argv[2]);
  std::string dataset_config_fname (argv[1]);
  
  // Initialize CN24
  Conv::System::Init();

  // Open network and dataset configuration files
  std::ifstream param_tensor_file(param_tensor_fname,std::ios::in | std::ios::binary);
  std::ifstream net_config_file(net_config_fname,std::ios::in);
  std::ifstream dataset_config_file(dataset_config_fname,std::ios::in);
  
  if(!param_tensor_file.good()) {
    FATAL("Cannot open param tensor file!");
  }
  if(!net_config_file.good()) {
    FATAL("Cannot open net configuration file!");
  }
  if(!dataset_config_file.good()) {
    FATAL("Cannot open dataset configuration file!");
  }
  
  // Parse network configuration file
  Conv::ConfigurableFactory* factory = new Conv::ConfigurableFactory(net_config_file, 238238, false);
  // Parse dataset configuration file
  Conv::TensorStreamDataset* dataset = Conv::TensorStreamDataset::CreateFromConfiguration(dataset_config_file, true);
  unsigned int CLASSES = dataset->GetClasses();
  
  // Load image
  Conv::Tensor original_data_tensor(input_image_fname);
  
  // Rescale image
  unsigned int width = original_data_tensor.width();
  unsigned int height = original_data_tensor.height();
  if(width & 1)
    width++;
  if(height & 1)
    height++;
  
  if(width & 2)
    width+=2;
  if(height & 2)
    height+=2;
  
  if(width & 4)
    width+=4;
  if(height & 4)
    height+=4;
  
  Conv::Tensor data_tensor(1, width, height, original_data_tensor.maps());
  data_tensor.Clear();
  Conv::Tensor::CopySample(original_data_tensor, 0, data_tensor, 0);

  // Assemble net
	Conv::NetGraph graph;
  Conv::InputLayer input_layer(data_tensor);

	Conv::NetGraphNode input_node(&input_layer);
  input_node.is_input = true;

	graph.AddNode(&input_node);
	bool complete = factory->AddLayers(graph, Conv::NetGraphConnection(&input_node), CLASSES);
	if (!complete)
    FATAL("Failed completeness check, inspect model!");

	graph.Initialize();


  // Load network parameters
  graph.DeserializeParameters(param_tensor_file);
  
  graph.SetIsTesting(true);
  LOGINFO << "Classifying..." << std::flush;
  graph.FeedForward();
  
	Conv::Tensor* net_output_tensor = &graph.GetDefaultOutputNode()->output_buffers[0].combined_tensor->data; // &net.buffer(output_layer_id)->data;
  Conv::Tensor image_output_tensor(1, net_output_tensor->width(), net_output_tensor->height(), 3);
  
  LOGINFO << "Colorizing..." << std::flush;
  dataset->Colorize(*net_output_tensor, image_output_tensor);
  
  // Recrop image down
  Conv::Tensor small(1, original_data_tensor.width(), original_data_tensor.height(), 3);
  for(unsigned int m = 0; m < 3; m++)
    for(unsigned int y = 0; y < small.height(); y++)
      for(unsigned int x = 0; x < small.width(); x++)
        *small.data_ptr(x,y,m,0) = *image_output_tensor.data_ptr_const(x,y,m,0);

  small.WriteToFile(output_image_fname);

  LOGINFO << "DONE!";
  LOGEND;
  return 0;
}