Exemplo n.º 1
0
int main(int argc, char * argv[])
{
  srand(time(NULL));

  // Let us define a 3 layer perceptron architecture
  auto input = gaml::mlp::input<X>(INPUT_DIM, fillInput);
  auto l1 = gaml::mlp::layer(input, HIDDEN_LAYER_SIZE, gaml::mlp::mlp_sigmoid(), gaml::mlp::mlp_dsigmoid());
  auto l2 = gaml::mlp::layer(l1, HIDDEN_LAYER_SIZE, gaml::mlp::mlp_sigmoid(), gaml::mlp::mlp_dsigmoid());
  auto output = gaml::mlp::layer(l2, OUTPUT_DIM, gaml::mlp::mlp_identity(), gaml::mlp::mlp_didentity());
  auto mlp = gaml::mlp::perceptron(output, output_of);

  // Create a training base
  // Let us try to fit a noisy sinc function
  Basis basis;
  basis.resize(NB_SAMPLES);
  for(auto& d: basis)
    {
      d.first = {{ -10.0 + 20.0 * gaml::random::uniform(0.0, 1.0) }} ;
      d.second = noisy_oracle(d.first);
    }

  // Set up the parameters for learning the MLP with a gradient descent
  gaml::mlp::learner::gradient::parameter gradient_params;
  gradient_params.alpha = 1e-2;
  gradient_params.dalpha = 1e-3;

  gradient_params.verbose = true;
  // The stopping criteria
  gradient_params.max_iter = 10000;
  gradient_params.min_dparams = 1e-7;

  // Create the learner
  auto learning_algorithm = gaml::mlp::learner::gradient::algorithm(mlp, gradient_params, gaml::mlp::loss::Quadratic(), fillOutput);

  // Call the learner on the basis and get the learned predictor
  auto predictor = learning_algorithm(basis.begin(),
				      basis.end(),
				      input_of_data,
				      output_of_data);

  // Print out the structure of the perceptron we learned
  std::cout << predictor << std::endl;

  // Dump the results
  std::ofstream outfile("example-005-samples.data");
  for(auto& b: basis)
    outfile << b.first[0] << " "
	    << b.second[0] << " " 
	    << std::endl;
  outfile.close();

  outfile.open("example-005-regression.data");
  X x;
  for(x[0] = -10; x[0] < 10 ; x[0] += 0.1)
    {
      auto output = predictor(x);
      outfile << x[0]         << " "
	      << oracle(x)[0] << " "
	      << output[0]    << std::endl;
    }
  outfile.close();

  std::cout << "You can plot the results using gnuplot :" << std::endl;
  std::cout << "gnuplot " << ML_MLP_SHAREDIR << "/plot-example-005.gplot" << std::endl;
  std::cout << "This will produce example-005.ps" << std::endl;


  // Let us compute the empirical risk.
  auto evaluator = gaml::risk::empirical(gaml::mlp::loss::Quadratic());
  double risk = evaluator(predictor,
			  basis.begin(),
			  basis.end(),
			  input_of_data,
			  output_of_data);
  std::cout << "Empirical risk = " << risk << std::endl;

  // We will use a 6-fold cross-validation to estimate the real risk.
  auto kfold_evaluator = gaml::risk::cross_validation(gaml::mlp::loss::Quadratic(),
						      gaml::partition::kfold(6),
						      true);

  double kfold_risk = kfold_evaluator(learning_algorithm,
				      basis.begin(),basis.end(),
				      input_of_data,output_of_data);

  std::cout << "Estimation of the real risk (6-fold): "
	    << kfold_risk << std::endl;


}
Exemplo n.º 2
0
int main(int argc, char * argv[])
{
  srand(time(NULL));

  // Create a training base
  // Let us try to fit a noisy sinc function
  Basis basis;
  basis.resize(NB_SAMPLES);
  for(auto& d: basis)
    {
      d.first = {{ -10.0 + 20.0 * gaml::random::uniform(0.0, 1.0) }} ;
      d.second = {{ sin(d.first[0])/d.first[0] + gaml::random::uniform(-0.1, 0.1) }};
    }
  
  // Create the learner
  MetaLearner learning_algorithm(true);
  
  std::cout << "Finding the optimal perceptron for the basis and train it..." << std::endl;

  // Call the learner on the basis and get the learned predictor
  auto predictor = learning_algorithm(basis.begin(),
				      basis.end(),
				      input_of_data,
				      output_of_data);

  std::cout << "done!" << std::endl;

  // Print ou the structure of the perceptron
  std::cout << predictor << std::endl;

  // Dump the results
  std::ofstream outfile("example-003.data");
  for(auto& b: basis)
    {
      auto output = predictor(b.first);
      outfile << b.first[0] << " "
	      << b.second[0] << " "
	      << output[0] << " " 
	      << std::endl;
    }
  outfile.close();
  std::cout << "You can plot the results using gnuplot :" << std::endl;
  std::cout << "gnuplot " << ML_MLP_SHAREDIR << "/plot-example-003.gplot" << std::endl;

  // Let us compute the empirical risk.
  auto evaluator = gaml::risk::empirical(gaml::mlp::loss::Quadratic());
  double risk = evaluator(predictor,
			  basis.begin(),
			  basis.end(),
			  input_of_data,
			  output_of_data);
  std::cout << "Empirical risk = " << risk << std::endl;

  // We will use a 6-fold cross-validation to estimate the real risk.
  auto kfold_evaluator = gaml::risk::cross_validation(gaml::mlp::loss::Quadratic(),
						      gaml::partition::kfold(6),
						      true);

  learning_algorithm.verbosity = false;

  double kfold_risk = kfold_evaluator(learning_algorithm,
				      basis.begin(),basis.end(),
				      input_of_data,output_of_data);

  std::cout << "Estimation of the real risk (6-fold): "
	    << kfold_risk << std::endl;

  return 0;
}