SimulatedAnnealingOrder::SimulatedAnnealingOrderResults* SimulatedAnnealingOrder::perform_order_selection(void)
{
    SimulatedAnnealingOrderResults* results = new SimulatedAnnealingOrderResults();

    NeuralNetwork* neural_network_pointer = training_strategy_pointer->get_performance_functional_pointer()->get_neural_network_pointer();
    MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

    size_t optimal_order, current_order;
    Vector<double> optimum_performance(2);
    Vector<double> current_order_performance(2);
    Vector<double> optimum_parameters, current_parameters;

    double current_training_performance, current_selection_performance;

    bool end = false;
    size_t iterations = 0;
    size_t random_failures = 0;
    size_t upper_bound;
    size_t lower_bound;

    time_t beginning_time, current_time;
    double elapsed_time;

    double temperature;
    double boltzmann_probability;
    double random_uniform;

    if(display)
    {
        std::cout << "Performing order selection with simulated annealing method..." << std::endl;
    }

    time(&beginning_time);

    optimal_order = (size_t)(minimum_order +
                             calculate_random_uniform(0.,1.)*(maximum_order - minimum_order));
    optimum_performance = perform_model_evaluation(optimal_order);
    optimum_parameters = get_parameters_order(optimal_order);

    current_training_performance = optimum_performance[0];
    current_selection_performance = optimum_performance[1];

    temperature = current_selection_performance;

    results->order_data.push_back(optimal_order);

    if(reserve_performance_data)
    {
        results->performance_data.push_back(current_training_performance);
    }

    if(reserve_selection_performance_data)
    {
        results->selection_performance_data.push_back(current_selection_performance);
    }

    if(reserve_parameters_data)
    {
        results->parameters_data.push_back(optimum_parameters);
    }

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if(display)
    {
        std::cout << "Initial values : " << std::endl;
        std::cout << "Hidden perceptrons : " << optimal_order << std::endl;
        std::cout << "Final selection performance : " << optimum_performance[1] << std::endl;
        std::cout << "Final Training Performance : " << optimum_performance[0] << std::endl;
        std::cout << "Temperature : " << temperature << std::endl;
        std::cout << "Elapsed time : " << elapsed_time << std::endl;
    }

    while (!end){

        upper_bound = std::min(maximum_order, optimal_order + (maximum_order-minimum_order)/3);
        if(optimal_order <= (maximum_order-minimum_order)/3)
        {
            lower_bound = minimum_order;
        }else
        {
            lower_bound = optimal_order - (maximum_order-minimum_order)/3;
        }

        current_order = (size_t)(lower_bound + calculate_random_uniform(0.,1.)*(upper_bound - lower_bound));
        while (current_order == optimal_order)
        {
            current_order = (size_t)(lower_bound + calculate_random_uniform(0.,1.)*(upper_bound - lower_bound));
            random_failures++;

            if(random_failures >= 5 && optimal_order != minimum_order)
            {
                current_order = optimal_order - 1;
            }else if(random_failures >= 5 && optimal_order != maximum_order)
            {
                current_order = optimal_order + 1;
            }
        }

        random_failures = 0;

        current_order_performance = perform_model_evaluation(current_order);
        current_training_performance = current_order_performance[0];
        current_selection_performance = current_order_performance[1];
        current_parameters = get_parameters_order(current_order);

        boltzmann_probability = std::min(1.0, exp(-(current_selection_performance-optimum_performance[1])/temperature));
        random_uniform = calculate_random_uniform(0.,1.);

        if(boltzmann_probability > random_uniform)
        {
            optimal_order = current_order;
            optimum_performance = current_order_performance;
            optimum_parameters = get_parameters_order(optimal_order);
        }

        time(&current_time);
        elapsed_time = difftime(current_time, beginning_time);

        results->order_data.push_back(current_order);

        if(reserve_performance_data)
        {
            results->performance_data.push_back(current_training_performance);
        }

        if(reserve_selection_performance_data)
        {
            results->selection_performance_data.push_back(current_selection_performance);
        }

        if(reserve_parameters_data)
        {
            results->parameters_data.push_back(current_parameters);
        }

        temperature = cooling_rate*temperature;

        iterations++;

        // Stopping criteria

        if(temperature <= minimum_temperature)
        {
            end = true;

            if(display)
            {
                std::cout << "Minimum temperature reached." << std::endl;
            }

            results->stopping_condition = SimulatedAnnealingOrder::MinimumTemperature;
        }else if(elapsed_time > maximum_time)
        {
            end = true;

            if(display)
            {
                std::cout << "Maximum time reached." << std::endl;
            }

            results->stopping_condition = SimulatedAnnealingOrder::MaximumTime;
        }else if(optimum_performance[1] <= selection_performance_goal)
        {
            end = true;

            if(display)
            {
                std::cout << "Selection performance reached." << std::endl;
            }

            results->stopping_condition = SimulatedAnnealingOrder::SelectionPerformanceGoal;
        }else if(iterations >= maximum_iterations_number)
        {
            end = true;

            if(display)
            {
                std::cout << "Maximum number of iterations reached." << std::endl;
            }

            results->stopping_condition = SimulatedAnnealingOrder::MaximumIterations;
        }

        if(display)
        {
            std::cout << "Iteration : " << iterations << std::endl;
            std::cout << "Hidden neurons number : " << optimal_order << std::endl;
            std::cout << "Selection performance : " << optimum_performance[1] << std::endl;
            std::cout << "Training performance : " << optimum_performance[0] << std::endl;
            std::cout << "Current temperature : " << temperature << std::endl;
            std::cout << "Elapsed time : " << elapsed_time << std::endl;
        }
    }

    size_t optimal_index = get_optimal_selection_performance_index();

    optimal_order = order_history[optimal_index] ;
    optimum_performance[0] = performance_history[optimal_index];
    optimum_performance[1] = selection_performance_history[optimal_index];
    optimum_parameters = get_parameters_order(optimal_order);

    if(display)
    {
        std::cout << "Optimal order : " << optimal_order << std::endl;
        std::cout << "Optimum selection performance : " << optimum_performance[1] << std::endl;
        std::cout << "Corresponding training performance : " << optimum_performance[0] << std::endl;
    }

    const size_t last_hidden_layer = multilayer_perceptron_pointer->get_layers_number()-2;
    const size_t perceptrons_number = multilayer_perceptron_pointer->get_layer_pointer(last_hidden_layer)->get_perceptrons_number();

    if(optimal_order > perceptrons_number)
    {
        multilayer_perceptron_pointer->grow_layer_perceptron(last_hidden_layer,optimal_order-perceptrons_number);
    }else
    {
        for (size_t i = 0; i < (perceptrons_number-optimal_order); i++)
        {
            multilayer_perceptron_pointer->prune_layer_perceptron(last_hidden_layer,0);
        }
    }

    multilayer_perceptron_pointer->set_parameters(optimum_parameters);

    if(reserve_minimal_parameters)
    {
        results->minimal_parameters = optimum_parameters;
    }

    results->optimal_order = optimal_order;
    results->final_performance = optimum_performance[0];
    results->final_selection_performance = optimum_performance[1];
    results->elapsed_time = elapsed_time;
    results->iterations_number = iterations;

    return(results);
}
GoldenSectionOrder::GoldenSectionOrderResults* GoldenSectionOrder::perform_order_selection(void)
{
    GoldenSectionOrderResults* results = new GoldenSectionOrderResults();

    NeuralNetwork* neural_network_pointer = training_strategy_pointer->get_performance_functional_pointer()->get_neural_network_pointer();
    MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

    const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
    const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();

    Vector<double> mu_performance(2);
    Vector<double> ln_performance(2);

    Vector<double> a_parameters;
    Vector<double> ln_parameters;
    Vector<double> mu_parameters;
    Vector<double> b_parameters;

    bool end = false;
    Vector<double> minimums(4);
    double minimum;
    size_t iterations = 0;

    double current_training_performance, current_generalization_performance;

    time_t beginning_time, current_time;
    double elapsed_time;

    size_t a = minimum_order;
    size_t b = maximum_order;
    size_t ln = (int)(a+(1.-0.618)*(b-a));
    size_t mu = (int)(a+0.618*(b-a));

    if (display)
        std::cout << "Performing order selection with golden section method..." << std::endl;

    time(&beginning_time);

    mu_performance = calculate_performances(mu);
    current_training_performance = mu_performance[0];
    current_generalization_performance = mu_performance[1];
    mu_parameters = get_parameters_order(mu);

    results->order_data.push_back(mu);

    if (reserve_performance_data)
    {
        results->performance_data.push_back(current_training_performance);
    }

    if (reserve_generalization_performance_data)
    {
        results->generalization_performance_data.push_back(current_generalization_performance);
    }

    if (reserve_parameters_data)
    {
        results->parameters_data.push_back(mu_parameters);
    }

    ln_performance = calculate_performances(ln);
    current_training_performance = ln_performance[0];
    current_generalization_performance = ln_performance[1];
    ln_parameters = get_parameters_order(ln);

    results->order_data.push_back(ln);

    if (reserve_performance_data)
    {
        results->performance_data.push_back(current_training_performance);
    }

    if (reserve_generalization_performance_data)
    {
        results->generalization_performance_data.push_back(current_generalization_performance);
    }

    if (reserve_parameters_data)
    {
        results->parameters_data.push_back(ln_parameters);
    }

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if (display)
    {
        std::cout << "Initial values : " << std::endl;
        std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
        std::cout << "ln final training performance : " << ln_performance[0] << std::endl;
        std::cout << "ln final generalization performance : " << ln_performance[1] << std::endl;
        std::cout << "mu final training performance : " << mu_performance[0] << std::endl;
        std::cout << "mu final generalization performance : " << mu_performance[1] << std::endl;
        std::cout << "Elapsed time : " << elapsed_time << std::endl;

    }

    if ((ln == mu) || (ln > mu) || (mu < ln)){
        end = true;
        if (display)
            std::cout << "Algorithm finished " << std::endl;
        results->stopping_condition = GoldenSectionOrder::AlgorithmFinished;
    }

    while(!end){

        if (ln_performance[1] < mu_performance[1]
        || fabs(ln_performance[1] - mu_performance[1]) < tolerance)
        {
            b = mu;
            mu = ln;
            mu_performance = ln_performance;
            ln = (int)(a+(1.-0.618)*(b-a));

            ln_performance = calculate_performances(ln);
            current_training_performance = ln_performance[0];
            current_generalization_performance = ln_performance[1];
            ln_parameters = get_parameters_order(ln);

            results->order_data.push_back(ln);

            if (reserve_performance_data)
            {
                results->performance_data.push_back(current_training_performance);
            }

            if (reserve_generalization_performance_data)
            {
                results->generalization_performance_data.push_back(current_generalization_performance);
            }

            if (reserve_parameters_data)
            {
                results->parameters_data.push_back(ln_parameters);
            }

        }else
        {
            a = ln;
            ln = mu;
            ln_performance = mu_performance;
            mu = (int)(a+0.618*(b-a));

            mu_performance = calculate_performances(mu);
            current_training_performance = mu_performance[0];
            current_generalization_performance = mu_performance[1];
            mu_parameters = get_parameters_order(mu);

            results->order_data.push_back(mu);

            if (reserve_performance_data)
            {
                results->performance_data.push_back(current_training_performance);
            }

            if (reserve_generalization_performance_data)
            {
                results->generalization_performance_data.push_back(current_generalization_performance);
            }

            if (reserve_parameters_data)
            {
                results->parameters_data.push_back(mu_parameters);
            }

        }

        time(&current_time);
        elapsed_time = difftime(current_time, beginning_time);

        iterations++;

        // Stopping criteria

        if ((ln == mu) || (ln > mu)){
            end = true;
            if (display)
                std::cout << "Algorithm finished " << std::endl;
            results->stopping_condition = GoldenSectionOrder::AlgorithmFinished;
        }else if (elapsed_time > maximum_time)
        {
            end = true;
            if (display)
                std::cout << "Maximum time reached." << std::endl;
            results->stopping_condition = GoldenSectionOrder::MaximumTime;
        }else if (fmin(ln_performance[1],mu_performance[1]) < selection_performance_goal)
        {
            end = true;
            if (display)
                std::cout << "Generalization performance reached." << std::endl;
            results->stopping_condition = GoldenSectionOrder::SelectionPerformanceGoal;
        }else if (iterations > maximum_iterations_number)
        {
            end = true;
            if (display)
                std::cout << "Maximum number of iterations reached." << std::endl;
            results->stopping_condition = GoldenSectionOrder::MaximumIterations;
        }

        if (display && !end)
        {

            std::cout << "Iteration : " << iterations << std::endl;
            std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
            std::cout << "ln final training performance : " << ln_performance[0] << std::endl;
            std::cout << "ln final generalization performance : " << ln_performance[1] << std::endl;
            std::cout << "mu final training performance : " << mu_performance[0] << std::endl;
            std::cout << "mu final generalization performance : " << mu_performance[1] << std::endl;
            std::cout << "Elapsed time : " << elapsed_time << std::endl;
        }
    }

    minimums[0] = calculate_performances(a)[1];
    a_parameters = get_parameters_order(a);

    minimums[1] = calculate_performances(ln)[1];
    ln_parameters = get_parameters_order(ln);

    minimums[2] = calculate_performances(mu)[1];
    mu_parameters = get_parameters_order(mu);

    minimums[3] = calculate_performances(b)[1];
    b_parameters = get_parameters_order(b);

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if (display)
    {
        std::cout << "Iteration : " << iterations << std::endl;
        std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
        std::cout << "a final training performance : " << calculate_performances(a)[0] << std::endl;
        std::cout << "a final generalization performance : " << calculate_performances(a)[1] << std::endl;
        std::cout << "ln final training performance : " << ln_performance[0] << std::endl;
        std::cout << "ln final generalization performance : " << ln_performance[1] << std::endl;
        std::cout << "mu final training performance : " << mu_performance[0] << std::endl;
        std::cout << "mu final generalization performance : " << mu_performance[1] << std::endl;
        std::cout << "b final training performance : " << calculate_performances(b)[0] << std::endl;
        std::cout << "b final generalization performance : " << calculate_performances(b)[1] << std::endl;
        std::cout << "Elapsed time : " << elapsed_time << std::endl;
    }

    minimum = minimums.calculate_minimum();

    if (fabs(minimums[0] - minimum) < tolerance)
    {
        if (display)
            std::cout << "Optimal order : " << a << std::endl;

        multilayer_perceptron_pointer->set(inputs_number, a, outputs_number);
        multilayer_perceptron_pointer->set_parameters(a_parameters);

        if (reserve_minimal_parameters)
            results->minimal_parameters = a_parameters;

        results->optimal_order = a;
        results->final_generalization_performance = minimums[0];
        results->final_performance = calculate_performances(a)[0];

    }else if (fabs(minimums[1] - minimum) < tolerance)
    {
        if (display)
            std::cout << "Optimal order : " << ln << std::endl;

        multilayer_perceptron_pointer->set(inputs_number, ln, outputs_number);
        multilayer_perceptron_pointer->set_parameters(ln_parameters);

        if (reserve_minimal_parameters)
            results->minimal_parameters = ln_parameters;

        results->optimal_order = ln;
        results->final_generalization_performance = minimums[1];
        results->final_performance = calculate_performances(ln)[0];

    }else if(fabs(minimums[2] - minimum) < tolerance)
    {
        if (display)
            std::cout << "Optimal order : " << mu << std::endl;

        multilayer_perceptron_pointer->set(inputs_number, mu, outputs_number);
        multilayer_perceptron_pointer->set_parameters(mu_parameters);

        if (reserve_minimal_parameters)
            results->minimal_parameters = mu_parameters;

        results->optimal_order = mu;
        results->final_generalization_performance = minimums[2];
        results->final_performance = calculate_performances(mu)[0];

    }else
    {
        if (display)
            std::cout << "Optimal order : " << b << std::endl;

        multilayer_perceptron_pointer->set(inputs_number, b, outputs_number);
        multilayer_perceptron_pointer->set_parameters(b_parameters);

        if (reserve_minimal_parameters)
            results->minimal_parameters = b_parameters;

        results->optimal_order = b;
        results->final_generalization_performance = minimums[3];
        results->final_performance = calculate_performances(b)[0];

    }

    results->elapsed_time = elapsed_time;
    results->iterations_number = iterations;

    return(results);
}
Example #3
0
IncrementalOrder::IncrementalOrderResults* IncrementalOrder::perform_order_selection(void)
{
    IncrementalOrderResults* results = new IncrementalOrderResults();

    NeuralNetwork* neural_network_pointer = training_strategy_pointer->get_performance_functional_pointer()->get_neural_network_pointer();
    MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

    const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
    const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();

    Vector<double> performance(2);
    double prev_generalization_performance = 1.0e99;

    size_t optimal_order;
    Vector<double> optimum_parameters;
    double optimum_generalization_performance;

    Vector<double> parameters_history_row;
    double current_training_performance, current_generalization_performance;

    size_t order = minimum_order;
    size_t iterations = 0;
    size_t selection_failures = 0;

    bool end = false;

    time_t beginning_time, current_time;
    double elapsed_time;

    if (display)
        std::cout << "Performing Incremental order selection..." << std::endl;

    time(&beginning_time);

    while (!end)
    {
        performance = calculate_performances(order);
        current_training_performance = performance[0];
        current_generalization_performance = performance[1];

        time(&current_time);
        elapsed_time = difftime(current_time, beginning_time);

        results->order_data.push_back(order);

        if (reserve_performance_data)
        {
            results->performance_data.push_back(current_training_performance);
        }

        if (reserve_generalization_performance_data)
        {
            results->generalization_performance_data.push_back(current_generalization_performance);
        }

        if (reserve_parameters_data)
        {
            parameters_history_row = get_parameters_order(order);
            results->parameters_data.push_back(parameters_history_row);
        }

        if (iterations == 0
        || (optimum_generalization_performance > current_generalization_performance
        && fabs(optimum_generalization_performance - current_generalization_performance) > tolerance))
        {
            optimal_order = order;
            optimum_generalization_performance = current_generalization_performance;
            optimum_parameters = get_parameters_order(optimal_order);
        }else if (prev_generalization_performance < current_generalization_performance)
            selection_failures++;

        prev_generalization_performance = current_generalization_performance;
        iterations++;

        // Stopping criteria

        if (elapsed_time > maximum_time)
        {
            end = true;
            if (display)
                std::cout << "Maximum time reached." << std::endl;
            results->stopping_condition = IncrementalOrder::MaximumTime;
        }else if (performance[1] < selection_performance_goal)
        {
            end = true;
            if (display)
                std::cout << "Generalization performance reached." << std::endl;
            results->stopping_condition = IncrementalOrder::SelectionPerformanceGoal;
        }else if (iterations > maximum_iterations_number)
        {
            end = true;
            if (display)
                std::cout << "Maximum number of iterations reached." << std::endl;
            results->stopping_condition = IncrementalOrder::MaximumIterations;
        }else if (selection_failures >= maximum_selection_failures)
        {
            end = true;
            if (display)
                std::cout << "Maximum generalization performance failures("<<selection_failures<<") reached." << std::endl;
            results->stopping_condition = IncrementalOrder::MaximumSelectionFailures;
        }else if (order == maximum_order)
        {
            end = true;
            if (display)
                std::cout << "Algorithm finished" << std::endl;
            results->stopping_condition = IncrementalOrder::AlgorithmFinished;
        }

        if (display)
        {
            std::cout << "Iteration : " << iterations << std::endl;
            std::cout << "Hidden Perceptron Number : " << order << std::endl;
            std::cout << "Final Training Performance : " << performance[0] << std::endl;
            std::cout << "Final Generalization Performance : " << performance[1] << std::endl;
            std::cout << "Elapsed time : " << elapsed_time << std::endl;
        }

        if (!end)
            order = std::min(maximum_order, order+step);
    }

    if (display)
        std::cout << "Optimal order : " << optimal_order << std:: endl;

    multilayer_perceptron_pointer->set(inputs_number, optimal_order, outputs_number);
    multilayer_perceptron_pointer->set_parameters(optimum_parameters);

    if (reserve_minimal_parameters)
        results->minimal_parameters = optimum_parameters;

    results->optimal_order = optimal_order;
    results->final_generalization_performance = optimum_generalization_performance;
    results->final_performance = calculate_performances(optimal_order)[0];
    results->iterations_number = iterations;
    results->elapsed_time = elapsed_time;

    return(results);
}
GoldenSectionOrder::GoldenSectionOrderResults* GoldenSectionOrder::perform_order_selection(void)
{
    GoldenSectionOrderResults* results = new GoldenSectionOrderResults();

    NeuralNetwork* neural_network_pointer = training_strategy_pointer->get_loss_index_pointer()->get_neural_network_pointer();
    MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

    Vector<double> mu_loss(2);
    Vector<double> ln_loss(2);

    Vector<double> a_parameters;
    Vector<double> ln_parameters;
    Vector<double> mu_parameters;
    Vector<double> b_parameters;

    size_t optimal_order;
    Vector<double> optimum_parameters;
    Vector<double> optimum_loss(2);

    bool end = false;
    Vector<double> minimums(4);
    double minimum;
    size_t iterations = 0;

    double current_training_loss, current_selection_loss;

    time_t beginning_time, current_time;
    double elapsed_time;

    size_t a = minimum_order;
    size_t b = maximum_order;
    size_t ln = (int)(a+(1.-0.618)*(b-a));
    size_t mu = (int)(a+0.618*(b-a));

    if(display)
    {
        std::cout << "Performing order selection with golden section method..." << std::endl;
    }

    time(&beginning_time);

    mu_loss = perform_model_evaluation(mu);
    current_training_loss = mu_loss[0];
    current_selection_loss = mu_loss[1];
    mu_parameters = get_parameters_order(mu);

    results->order_data.push_back(mu);

    if(reserve_loss_data)
    {
        results->loss_data.push_back(current_training_loss);
    }

    if(reserve_selection_loss_data)
    {
        results->selection_loss_data.push_back(current_selection_loss);
    }

    if(reserve_parameters_data)
    {
        results->parameters_data.push_back(mu_parameters);
    }

    ln_loss = perform_model_evaluation(ln);
    current_training_loss = ln_loss[0];
    current_selection_loss = ln_loss[1];
    ln_parameters = get_parameters_order(ln);

    results->order_data.push_back(ln);

    if(reserve_loss_data)
    {
        results->loss_data.push_back(current_training_loss);
    }

    if(reserve_selection_loss_data)
    {
        results->selection_loss_data.push_back(current_selection_loss);
    }

    if(reserve_parameters_data)
    {
        results->parameters_data.push_back(ln_parameters);
    }

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if(display)
    {
        std::cout << "Initial values: " << std::endl;
        std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
        std::cout << "ln final training loss: " << ln_loss[0] << std::endl;
        std::cout << "ln final selection loss: " << ln_loss[1] << std::endl;
        std::cout << "mu final training loss: " << mu_loss[0] << std::endl;
        std::cout << "mu final selection loss: " << mu_loss[1] << std::endl;
        std::cout << "Elapsed time: " << elapsed_time << std::endl;
    }

    if((ln == mu) || (ln > mu) || (mu < ln))
    {
        end = true;

        if(display)
        {
            std::cout << "Algorithm finished " << std::endl;
        }

        results->stopping_condition = GoldenSectionOrder::AlgorithmFinished;
    }

    while(!end){

        if(ln_loss[1] < mu_loss[1]
        || fabs(ln_loss[1] - mu_loss[1]) < tolerance)
        {
            b = mu;
            mu = ln;
            mu_loss = ln_loss;
            ln = (int)(a+(1.-0.618)*(b-a));

            ln_loss = perform_model_evaluation(ln);
            current_training_loss = ln_loss[0];
            current_selection_loss = ln_loss[1];
            ln_parameters = get_parameters_order(ln);

            results->order_data.push_back(ln);

            if(reserve_loss_data)
            {
                results->loss_data.push_back(current_training_loss);
            }

            if(reserve_selection_loss_data)
            {
                results->selection_loss_data.push_back(current_selection_loss);
            }

            if(reserve_parameters_data)
            {
                results->parameters_data.push_back(ln_parameters);
            }

        }
        else
        {
            a = ln;
            ln = mu;
            ln_loss = mu_loss;
            mu = (int)(a+0.618*(b-a));

            mu_loss = perform_model_evaluation(mu);
            current_training_loss = mu_loss[0];
            current_selection_loss = mu_loss[1];
            mu_parameters = get_parameters_order(mu);

            results->order_data.push_back(mu);

            if(reserve_loss_data)
            {
                results->loss_data.push_back(current_training_loss);
            }

            if(reserve_selection_loss_data)
            {
                results->selection_loss_data.push_back(current_selection_loss);
            }

            if(reserve_parameters_data)
            {
                results->parameters_data.push_back(mu_parameters);
            }

        }

        time(&current_time);
        elapsed_time = difftime(current_time, beginning_time);

        iterations++;

        // Stopping criteria

        if((ln == mu) || (ln > mu))
        {
            end = true;

            if(display)
            {
                std::cout << "Algorithm finished " << std::endl;
            }

            results->stopping_condition = GoldenSectionOrder::AlgorithmFinished;
        }
        else if(elapsed_time >= maximum_time)
        {
            end = true;

            if(display)
            {
                std::cout << "Maximum time reached." << std::endl;
            }

            results->stopping_condition = GoldenSectionOrder::MaximumTime;
        }
        else if(std::min(ln_loss[1],mu_loss[1]) <= selection_loss_goal)
        {
            end = true;

            if(display)
            {
                std::cout << "Selection loss reached." << std::endl;
            }

            results->stopping_condition = GoldenSectionOrder::SelectionLossGoal;
        }
        else if(iterations >= maximum_iterations_number)
        {
            end = true;

            if(display)
            {
                std::cout << "Maximum number of iterations reached." << std::endl;
            }

            results->stopping_condition = GoldenSectionOrder::MaximumIterations;
        }

        if(display && !end)
        {

            std::cout << "Iteration: " << iterations << std::endl;
            std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
            std::cout << "ln final training loss: " << ln_loss[0] << std::endl;
            std::cout << "ln final selection loss: " << ln_loss[1] << std::endl;
            std::cout << "mu final training loss: " << mu_loss[0] << std::endl;
            std::cout << "mu final selection loss: " << mu_loss[1] << std::endl;
            std::cout << "Elapsed time: " << elapsed_time << std::endl;
        }
    }

    minimums[0] = perform_model_evaluation(a)[1];
    a_parameters = get_parameters_order(a);

    minimums[1] = perform_model_evaluation(ln)[1];
    ln_parameters = get_parameters_order(ln);

    minimums[2] = perform_model_evaluation(mu)[1];
    mu_parameters = get_parameters_order(mu);

    minimums[3] = perform_model_evaluation(b)[1];
    b_parameters = get_parameters_order(b);

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if(display)
    {
        std::cout << "Iteration: " << iterations << std::endl;
        std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
        std::cout << "a final training loss: " << perform_model_evaluation(a)[0] << std::endl;
        std::cout << "a final selection loss: " << perform_model_evaluation(a)[1] << std::endl;
        std::cout << "ln final training loss: " << ln_loss[0] << std::endl;
        std::cout << "ln final selection loss: " << ln_loss[1] << std::endl;
        std::cout << "mu final training loss: " << mu_loss[0] << std::endl;
        std::cout << "mu final selection loss: " << mu_loss[1] << std::endl;
        std::cout << "b final training loss: " << perform_model_evaluation(b)[0] << std::endl;
        std::cout << "b final selection loss: " << perform_model_evaluation(b)[1] << std::endl;
        std::cout << "Elapsed time: " << elapsed_time << std::endl;
    }

    minimum = minimums.calculate_minimum();

    if(fabs(minimums[0] - minimum) < tolerance)
    {
        optimal_order = a;

        optimum_parameters = a_parameters;

        optimum_loss[0] = perform_model_evaluation(a)[0];
        optimum_loss[1] = minimums[0];

    }
    else if(fabs(minimums[1] - minimum) < tolerance)
    {
        optimal_order = ln;

        optimum_parameters = ln_parameters;

        optimum_loss[0] = perform_model_evaluation(ln)[0];
        optimum_loss[1] = minimums[1];

    }
    else if(fabs(minimums[2] - minimum) < tolerance)
    {
        optimal_order = mu;

        optimum_parameters = mu_parameters;

        optimum_loss[0] = perform_model_evaluation(mu)[0];
        optimum_loss[1] = minimums[2];
    }
    else
    {
        optimal_order = b;

        optimum_parameters = b_parameters;

        optimum_loss[0] = perform_model_evaluation(b)[0];
        optimum_loss[1] = minimums[3];
    }

    if(display)
    {
        std::cout << "Optimal order: " << optimal_order << std::endl;
    }

    const size_t last_hidden_layer = multilayer_perceptron_pointer->get_layers_number()-2;
    const size_t perceptrons_number = multilayer_perceptron_pointer->get_layer_pointer(last_hidden_layer)->get_perceptrons_number();

    if(optimal_order > perceptrons_number)
    {
        multilayer_perceptron_pointer->grow_layer_perceptron(last_hidden_layer,optimal_order-perceptrons_number);
    }
    else
    {
        for (size_t i = 0; i < (perceptrons_number-optimal_order); i++)
            multilayer_perceptron_pointer->prune_layer_perceptron(last_hidden_layer,0);
    }

    multilayer_perceptron_pointer->set_parameters(optimum_parameters);

#ifdef __OPENNN_MPI__
    neural_network_pointer->set_multilayer_perceptron_pointer(multilayer_perceptron_pointer);
#endif

    if(reserve_minimal_parameters)
    {
        results->minimal_parameters = optimum_parameters;
    }

    results->optimal_order = optimal_order;
    results->final_loss = optimum_loss[0];
    results->final_selection_loss = optimum_loss[1];
    results->elapsed_time = elapsed_time;
    results->iterations_number = iterations;

    return(results);
}