void Perceptron::initialize_bias_uniform(double minimum, double maximum)
{
   // Control sentence (if debug)

   #ifdef _DEBUG 

   if(minimum > maximum)
   {
      std::cerr << "Flood Error: Perceptron class." << std::endl 
                << "initialize_bias_uniform(double, double) method." << std::endl
                << "Minimum value must be less than maximum value." << std::endl;

      exit(1);
   }

   #endif

   bias = calculate_random_uniform(minimum, maximum);
}
Beispiel #2
0
void Perceptron::initialize_bias_uniform(const double& minimum, const double& maximum)
{
   // Control sentence (if debug)

   #ifdef __OPENNN_DEBUG__ 

   if(minimum > maximum)
   {
	 std::ostringstream buffer;

      buffer << "OpenNN Exception: Perceptron class.\n" 
             << "initialize_bias_uniform(const double&, const double&) method.\n"
             << "Minimum value must be less than maximum value.\n";

      throw std::logic_error(buffer.str());
   }

   #endif

   bias = calculate_random_uniform(minimum, maximum);
}
SimulatedAnnealingOrder::SimulatedAnnealingOrderResults* SimulatedAnnealingOrder::perform_order_selection(void)
{
    SimulatedAnnealingOrderResults* results = new SimulatedAnnealingOrderResults();

    NeuralNetwork* neural_network_pointer = training_strategy_pointer->get_performance_functional_pointer()->get_neural_network_pointer();
    MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

    size_t optimal_order, current_order;
    Vector<double> optimum_performance(2);
    Vector<double> current_order_performance(2);
    Vector<double> optimum_parameters, current_parameters;

    double current_training_performance, current_selection_performance;

    bool end = false;
    size_t iterations = 0;
    size_t random_failures = 0;
    size_t upper_bound;
    size_t lower_bound;

    time_t beginning_time, current_time;
    double elapsed_time;

    double temperature;
    double boltzmann_probability;
    double random_uniform;

    if(display)
    {
        std::cout << "Performing order selection with simulated annealing method..." << std::endl;
    }

    time(&beginning_time);

    optimal_order = (size_t)(minimum_order +
                             calculate_random_uniform(0.,1.)*(maximum_order - minimum_order));
    optimum_performance = perform_model_evaluation(optimal_order);
    optimum_parameters = get_parameters_order(optimal_order);

    current_training_performance = optimum_performance[0];
    current_selection_performance = optimum_performance[1];

    temperature = current_selection_performance;

    results->order_data.push_back(optimal_order);

    if(reserve_performance_data)
    {
        results->performance_data.push_back(current_training_performance);
    }

    if(reserve_selection_performance_data)
    {
        results->selection_performance_data.push_back(current_selection_performance);
    }

    if(reserve_parameters_data)
    {
        results->parameters_data.push_back(optimum_parameters);
    }

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if(display)
    {
        std::cout << "Initial values : " << std::endl;
        std::cout << "Hidden perceptrons : " << optimal_order << std::endl;
        std::cout << "Final selection performance : " << optimum_performance[1] << std::endl;
        std::cout << "Final Training Performance : " << optimum_performance[0] << std::endl;
        std::cout << "Temperature : " << temperature << std::endl;
        std::cout << "Elapsed time : " << elapsed_time << std::endl;
    }

    while (!end){

        upper_bound = std::min(maximum_order, optimal_order + (maximum_order-minimum_order)/3);
        if(optimal_order <= (maximum_order-minimum_order)/3)
        {
            lower_bound = minimum_order;
        }else
        {
            lower_bound = optimal_order - (maximum_order-minimum_order)/3;
        }

        current_order = (size_t)(lower_bound + calculate_random_uniform(0.,1.)*(upper_bound - lower_bound));
        while (current_order == optimal_order)
        {
            current_order = (size_t)(lower_bound + calculate_random_uniform(0.,1.)*(upper_bound - lower_bound));
            random_failures++;

            if(random_failures >= 5 && optimal_order != minimum_order)
            {
                current_order = optimal_order - 1;
            }else if(random_failures >= 5 && optimal_order != maximum_order)
            {
                current_order = optimal_order + 1;
            }
        }

        random_failures = 0;

        current_order_performance = perform_model_evaluation(current_order);
        current_training_performance = current_order_performance[0];
        current_selection_performance = current_order_performance[1];
        current_parameters = get_parameters_order(current_order);

        boltzmann_probability = std::min(1.0, exp(-(current_selection_performance-optimum_performance[1])/temperature));
        random_uniform = calculate_random_uniform(0.,1.);

        if(boltzmann_probability > random_uniform)
        {
            optimal_order = current_order;
            optimum_performance = current_order_performance;
            optimum_parameters = get_parameters_order(optimal_order);
        }

        time(&current_time);
        elapsed_time = difftime(current_time, beginning_time);

        results->order_data.push_back(current_order);

        if(reserve_performance_data)
        {
            results->performance_data.push_back(current_training_performance);
        }

        if(reserve_selection_performance_data)
        {
            results->selection_performance_data.push_back(current_selection_performance);
        }

        if(reserve_parameters_data)
        {
            results->parameters_data.push_back(current_parameters);
        }

        temperature = cooling_rate*temperature;

        iterations++;

        // Stopping criteria

        if(temperature <= minimum_temperature)
        {
            end = true;

            if(display)
            {
                std::cout << "Minimum temperature reached." << std::endl;
            }

            results->stopping_condition = SimulatedAnnealingOrder::MinimumTemperature;
        }else if(elapsed_time > maximum_time)
        {
            end = true;

            if(display)
            {
                std::cout << "Maximum time reached." << std::endl;
            }

            results->stopping_condition = SimulatedAnnealingOrder::MaximumTime;
        }else if(optimum_performance[1] <= selection_performance_goal)
        {
            end = true;

            if(display)
            {
                std::cout << "Selection performance reached." << std::endl;
            }

            results->stopping_condition = SimulatedAnnealingOrder::SelectionPerformanceGoal;
        }else if(iterations >= maximum_iterations_number)
        {
            end = true;

            if(display)
            {
                std::cout << "Maximum number of iterations reached." << std::endl;
            }

            results->stopping_condition = SimulatedAnnealingOrder::MaximumIterations;
        }

        if(display)
        {
            std::cout << "Iteration : " << iterations << std::endl;
            std::cout << "Hidden neurons number : " << optimal_order << std::endl;
            std::cout << "Selection performance : " << optimum_performance[1] << std::endl;
            std::cout << "Training performance : " << optimum_performance[0] << std::endl;
            std::cout << "Current temperature : " << temperature << std::endl;
            std::cout << "Elapsed time : " << elapsed_time << std::endl;
        }
    }

    size_t optimal_index = get_optimal_selection_performance_index();

    optimal_order = order_history[optimal_index] ;
    optimum_performance[0] = performance_history[optimal_index];
    optimum_performance[1] = selection_performance_history[optimal_index];
    optimum_parameters = get_parameters_order(optimal_order);

    if(display)
    {
        std::cout << "Optimal order : " << optimal_order << std::endl;
        std::cout << "Optimum selection performance : " << optimum_performance[1] << std::endl;
        std::cout << "Corresponding training performance : " << optimum_performance[0] << std::endl;
    }

    const size_t last_hidden_layer = multilayer_perceptron_pointer->get_layers_number()-2;
    const size_t perceptrons_number = multilayer_perceptron_pointer->get_layer_pointer(last_hidden_layer)->get_perceptrons_number();

    if(optimal_order > perceptrons_number)
    {
        multilayer_perceptron_pointer->grow_layer_perceptron(last_hidden_layer,optimal_order-perceptrons_number);
    }else
    {
        for (size_t i = 0; i < (perceptrons_number-optimal_order); i++)
        {
            multilayer_perceptron_pointer->prune_layer_perceptron(last_hidden_layer,0);
        }
    }

    multilayer_perceptron_pointer->set_parameters(optimum_parameters);

    if(reserve_minimal_parameters)
    {
        results->minimal_parameters = optimum_parameters;
    }

    results->optimal_order = optimal_order;
    results->final_performance = optimum_performance[0];
    results->final_selection_performance = optimum_performance[1];
    results->elapsed_time = elapsed_time;
    results->iterations_number = iterations;

    return(results);
}