int parameter_server(int argc, char *argv[])
{
    std::vector<std::vector<double> > training; // TODO: can be optimized, we can count the number of feats and allocate accordingly
    std::vector<std::vector<double> > validation; // TODO: can be optimized, we can count the number of feats and allocate accordingly

    std::unordered_set<double> label_set;
    std::unordered_map<double, std::vector<double> > model;

    char *training_file = argv[1];

    assert(strlen(argv[2]) == 1);
    char delimiter = argv[2][0];

    // read training data
    std::cout << "Reading training data ... ";
    read_training_data(training_file, delimiter, training, validation, label_set);
    std::cout << "[Done]" << std::endl;

    int data_passes = std::stoi(std::string(argv[6]));;
    size_t batch_size = training.size();
    
    if (argc == 8)
        batch_size = std::stoul(std::string(argv[7]));

    std::cout << std::endl
        << "***Info***" << std::endl
        << "Training Data File: " << training_file << std::endl
        << "Learning Rate: " << learning_rate << std::endl
        << "Regularization Parameter: " << reg_param << std::endl
        << "Data Passes: " << data_passes << std::endl
        << "Batch Size: " << batch_size << std::endl
        << "Num Labels: " << label_set.size() << std::endl
        << "Training Set Size: " << training.size() << std::endl
        << "Validation Set Size: " << validation.size() << std::endl
        << "**********" << std::endl;

    // logistic regression
    logistic_regression(training, label_set, model, learning_rate, reg_param, batch_size, data_passes);

    // print the f-score(s)
    const std::unordered_map<double, double> fsc = fscore(validation, label_set, model);
    std::cout << std::endl << std::endl;
    std::cout << "F-Score(s):" << std::endl;
    std::cout << "Label\tScore" << std::endl;
    for (std::unordered_map<double, double>::const_iterator it = fsc.begin(); it != fsc.end(); ++it) {
        std::cout << it->first << "\t" << it->second << std::endl;
    }

    return 0;
}
//--------------------------------------------------------------------
int main(int argc, char* argv[])
{
	t_parameters params;
	params.num_sub_populations = 2;
	params.sub_population_size = 30;						    // the number of individuals in population  (must be an even number!)
	params.code_length = 50;
	params.num_generations = 1000;					// the number of generations
	params.mutation_probability = 0.01;              // mutation probability
	params.crossover_probability = 0.9;             // crossover probability

	params.variables_probability = 0.4;
	params.operators_probability = 0.5;
	params.constants_probability = 1 - params.variables_probability - params.operators_probability; // sum of variables_prob + operators_prob + constants_prob MUST BE 1 !

	params.num_constants = 10; // use 3 constants from -1 ... +1 interval
	params.constants_min = -1;
	params.constants_max = 1;

#ifdef USE_THREADS
	params.num_threads = 4;
#endif

	t_graph *graphs = NULL;
	int num_graphs = 0;

	if (!read_training_data(graphs, num_graphs)) {
		printf("Cannot find input file(s)! Please specify the full path!\n");
		printf("Press Enter ...");
		getchar();
		return 1;
	}

	compute_global_variables(graphs, num_graphs);

	int num_variables = 10;

	int current_proc_id = 0;

	int num_procs = 0;
#ifdef USE_MPI

	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
	MPI_Comm_rank(MPI_COMM_WORLD, &current_proc_id);
#endif

	srand(current_proc_id); // we run each process with a different seed


	printf("Evolving... proc_id = %d\n", current_proc_id);
	start_steady_state(params, graphs, 2, graphs + 2, 2, num_variables, num_procs, current_proc_id);

	delete_training_graphs(graphs, num_graphs);

#ifdef USE_MPI
	if (current_proc_id == 0) {


		printf("Press Enter ...");
		getchar();
	}

	MPI_Finalize();
#endif

	return 0;
}