GoldenSectionOrder::GoldenSectionOrderResults* GoldenSectionOrder::perform_order_selection(void)
{
    GoldenSectionOrderResults* results = new GoldenSectionOrderResults();

    NeuralNetwork* neural_network_pointer = training_strategy_pointer->get_loss_index_pointer()->get_neural_network_pointer();
    MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

    Vector<double> mu_loss(2);
    Vector<double> ln_loss(2);

    Vector<double> a_parameters;
    Vector<double> ln_parameters;
    Vector<double> mu_parameters;
    Vector<double> b_parameters;

    size_t optimal_order;
    Vector<double> optimum_parameters;
    Vector<double> optimum_loss(2);

    bool end = false;
    Vector<double> minimums(4);
    double minimum;
    size_t iterations = 0;

    double current_training_loss, current_selection_loss;

    time_t beginning_time, current_time;
    double elapsed_time;

    size_t a = minimum_order;
    size_t b = maximum_order;
    size_t ln = (int)(a+(1.-0.618)*(b-a));
    size_t mu = (int)(a+0.618*(b-a));

    if(display)
    {
        std::cout << "Performing order selection with golden section method..." << std::endl;
    }

    time(&beginning_time);

    mu_loss = perform_model_evaluation(mu);
    current_training_loss = mu_loss[0];
    current_selection_loss = mu_loss[1];
    mu_parameters = get_parameters_order(mu);

    results->order_data.push_back(mu);

    if(reserve_loss_data)
    {
        results->loss_data.push_back(current_training_loss);
    }

    if(reserve_selection_loss_data)
    {
        results->selection_loss_data.push_back(current_selection_loss);
    }

    if(reserve_parameters_data)
    {
        results->parameters_data.push_back(mu_parameters);
    }

    ln_loss = perform_model_evaluation(ln);
    current_training_loss = ln_loss[0];
    current_selection_loss = ln_loss[1];
    ln_parameters = get_parameters_order(ln);

    results->order_data.push_back(ln);

    if(reserve_loss_data)
    {
        results->loss_data.push_back(current_training_loss);
    }

    if(reserve_selection_loss_data)
    {
        results->selection_loss_data.push_back(current_selection_loss);
    }

    if(reserve_parameters_data)
    {
        results->parameters_data.push_back(ln_parameters);
    }

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if(display)
    {
        std::cout << "Initial values: " << std::endl;
        std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
        std::cout << "ln final training loss: " << ln_loss[0] << std::endl;
        std::cout << "ln final selection loss: " << ln_loss[1] << std::endl;
        std::cout << "mu final training loss: " << mu_loss[0] << std::endl;
        std::cout << "mu final selection loss: " << mu_loss[1] << std::endl;
        std::cout << "Elapsed time: " << elapsed_time << std::endl;
    }

    if((ln == mu) || (ln > mu) || (mu < ln))
    {
        end = true;

        if(display)
        {
            std::cout << "Algorithm finished " << std::endl;
        }

        results->stopping_condition = GoldenSectionOrder::AlgorithmFinished;
    }

    while(!end){

        if(ln_loss[1] < mu_loss[1]
        || fabs(ln_loss[1] - mu_loss[1]) < tolerance)
        {
            b = mu;
            mu = ln;
            mu_loss = ln_loss;
            ln = (int)(a+(1.-0.618)*(b-a));

            ln_loss = perform_model_evaluation(ln);
            current_training_loss = ln_loss[0];
            current_selection_loss = ln_loss[1];
            ln_parameters = get_parameters_order(ln);

            results->order_data.push_back(ln);

            if(reserve_loss_data)
            {
                results->loss_data.push_back(current_training_loss);
            }

            if(reserve_selection_loss_data)
            {
                results->selection_loss_data.push_back(current_selection_loss);
            }

            if(reserve_parameters_data)
            {
                results->parameters_data.push_back(ln_parameters);
            }

        }
        else
        {
            a = ln;
            ln = mu;
            ln_loss = mu_loss;
            mu = (int)(a+0.618*(b-a));

            mu_loss = perform_model_evaluation(mu);
            current_training_loss = mu_loss[0];
            current_selection_loss = mu_loss[1];
            mu_parameters = get_parameters_order(mu);

            results->order_data.push_back(mu);

            if(reserve_loss_data)
            {
                results->loss_data.push_back(current_training_loss);
            }

            if(reserve_selection_loss_data)
            {
                results->selection_loss_data.push_back(current_selection_loss);
            }

            if(reserve_parameters_data)
            {
                results->parameters_data.push_back(mu_parameters);
            }

        }

        time(&current_time);
        elapsed_time = difftime(current_time, beginning_time);

        iterations++;

        // Stopping criteria

        if((ln == mu) || (ln > mu))
        {
            end = true;

            if(display)
            {
                std::cout << "Algorithm finished " << std::endl;
            }

            results->stopping_condition = GoldenSectionOrder::AlgorithmFinished;
        }
        else if(elapsed_time >= maximum_time)
        {
            end = true;

            if(display)
            {
                std::cout << "Maximum time reached." << std::endl;
            }

            results->stopping_condition = GoldenSectionOrder::MaximumTime;
        }
        else if(std::min(ln_loss[1],mu_loss[1]) <= selection_loss_goal)
        {
            end = true;

            if(display)
            {
                std::cout << "Selection loss reached." << std::endl;
            }

            results->stopping_condition = GoldenSectionOrder::SelectionLossGoal;
        }
        else if(iterations >= maximum_iterations_number)
        {
            end = true;

            if(display)
            {
                std::cout << "Maximum number of iterations reached." << std::endl;
            }

            results->stopping_condition = GoldenSectionOrder::MaximumIterations;
        }

        if(display && !end)
        {

            std::cout << "Iteration: " << iterations << std::endl;
            std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
            std::cout << "ln final training loss: " << ln_loss[0] << std::endl;
            std::cout << "ln final selection loss: " << ln_loss[1] << std::endl;
            std::cout << "mu final training loss: " << mu_loss[0] << std::endl;
            std::cout << "mu final selection loss: " << mu_loss[1] << std::endl;
            std::cout << "Elapsed time: " << elapsed_time << std::endl;
        }
    }

    minimums[0] = perform_model_evaluation(a)[1];
    a_parameters = get_parameters_order(a);

    minimums[1] = perform_model_evaluation(ln)[1];
    ln_parameters = get_parameters_order(ln);

    minimums[2] = perform_model_evaluation(mu)[1];
    mu_parameters = get_parameters_order(mu);

    minimums[3] = perform_model_evaluation(b)[1];
    b_parameters = get_parameters_order(b);

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if(display)
    {
        std::cout << "Iteration: " << iterations << std::endl;
        std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
        std::cout << "a final training loss: " << perform_model_evaluation(a)[0] << std::endl;
        std::cout << "a final selection loss: " << perform_model_evaluation(a)[1] << std::endl;
        std::cout << "ln final training loss: " << ln_loss[0] << std::endl;
        std::cout << "ln final selection loss: " << ln_loss[1] << std::endl;
        std::cout << "mu final training loss: " << mu_loss[0] << std::endl;
        std::cout << "mu final selection loss: " << mu_loss[1] << std::endl;
        std::cout << "b final training loss: " << perform_model_evaluation(b)[0] << std::endl;
        std::cout << "b final selection loss: " << perform_model_evaluation(b)[1] << std::endl;
        std::cout << "Elapsed time: " << elapsed_time << std::endl;
    }

    minimum = minimums.calculate_minimum();

    if(fabs(minimums[0] - minimum) < tolerance)
    {
        optimal_order = a;

        optimum_parameters = a_parameters;

        optimum_loss[0] = perform_model_evaluation(a)[0];
        optimum_loss[1] = minimums[0];

    }
    else if(fabs(minimums[1] - minimum) < tolerance)
    {
        optimal_order = ln;

        optimum_parameters = ln_parameters;

        optimum_loss[0] = perform_model_evaluation(ln)[0];
        optimum_loss[1] = minimums[1];

    }
    else if(fabs(minimums[2] - minimum) < tolerance)
    {
        optimal_order = mu;

        optimum_parameters = mu_parameters;

        optimum_loss[0] = perform_model_evaluation(mu)[0];
        optimum_loss[1] = minimums[2];
    }
    else
    {
        optimal_order = b;

        optimum_parameters = b_parameters;

        optimum_loss[0] = perform_model_evaluation(b)[0];
        optimum_loss[1] = minimums[3];
    }

    if(display)
    {
        std::cout << "Optimal order: " << optimal_order << std::endl;
    }

    const size_t last_hidden_layer = multilayer_perceptron_pointer->get_layers_number()-2;
    const size_t perceptrons_number = multilayer_perceptron_pointer->get_layer_pointer(last_hidden_layer)->get_perceptrons_number();

    if(optimal_order > perceptrons_number)
    {
        multilayer_perceptron_pointer->grow_layer_perceptron(last_hidden_layer,optimal_order-perceptrons_number);
    }
    else
    {
        for (size_t i = 0; i < (perceptrons_number-optimal_order); i++)
            multilayer_perceptron_pointer->prune_layer_perceptron(last_hidden_layer,0);
    }

    multilayer_perceptron_pointer->set_parameters(optimum_parameters);

#ifdef __OPENNN_MPI__
    neural_network_pointer->set_multilayer_perceptron_pointer(multilayer_perceptron_pointer);
#endif

    if(reserve_minimal_parameters)
    {
        results->minimal_parameters = optimum_parameters;
    }

    results->optimal_order = optimal_order;
    results->final_loss = optimum_loss[0];
    results->final_selection_loss = optimum_loss[1];
    results->elapsed_time = elapsed_time;
    results->iterations_number = iterations;

    return(results);
}
Пример #2
0
GoldenSectionOrder::GoldenSectionOrderResults* GoldenSectionOrder::perform_order_selection(void)
{
    GoldenSectionOrderResults* results = new GoldenSectionOrderResults();

    NeuralNetwork* neural_network_pointer = training_strategy_pointer->get_performance_functional_pointer()->get_neural_network_pointer();
    MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();

    const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
    const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();

    Vector<double> mu_performance(2);
    Vector<double> ln_performance(2);

    Vector<double> a_parameters;
    Vector<double> ln_parameters;
    Vector<double> mu_parameters;
    Vector<double> b_parameters;

    bool end = false;
    Vector<double> minimums(4);
    double minimum;
    size_t iterations = 0;

    double current_training_performance, current_generalization_performance;

    time_t beginning_time, current_time;
    double elapsed_time;

    size_t a = minimum_order;
    size_t b = maximum_order;
    size_t ln = (int)(a+(1.-0.618)*(b-a));
    size_t mu = (int)(a+0.618*(b-a));

    if (display)
        std::cout << "Performing order selection with golden section method..." << std::endl;

    time(&beginning_time);

    mu_performance = calculate_performances(mu);
    current_training_performance = mu_performance[0];
    current_generalization_performance = mu_performance[1];
    mu_parameters = get_parameters_order(mu);

    results->order_data.push_back(mu);

    if (reserve_performance_data)
    {
        results->performance_data.push_back(current_training_performance);
    }

    if (reserve_generalization_performance_data)
    {
        results->generalization_performance_data.push_back(current_generalization_performance);
    }

    if (reserve_parameters_data)
    {
        results->parameters_data.push_back(mu_parameters);
    }

    ln_performance = calculate_performances(ln);
    current_training_performance = ln_performance[0];
    current_generalization_performance = ln_performance[1];
    ln_parameters = get_parameters_order(ln);

    results->order_data.push_back(ln);

    if (reserve_performance_data)
    {
        results->performance_data.push_back(current_training_performance);
    }

    if (reserve_generalization_performance_data)
    {
        results->generalization_performance_data.push_back(current_generalization_performance);
    }

    if (reserve_parameters_data)
    {
        results->parameters_data.push_back(ln_parameters);
    }

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if (display)
    {
        std::cout << "Initial values : " << std::endl;
        std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
        std::cout << "ln final training performance : " << ln_performance[0] << std::endl;
        std::cout << "ln final generalization performance : " << ln_performance[1] << std::endl;
        std::cout << "mu final training performance : " << mu_performance[0] << std::endl;
        std::cout << "mu final generalization performance : " << mu_performance[1] << std::endl;
        std::cout << "Elapsed time : " << elapsed_time << std::endl;

    }

    if ((ln == mu) || (ln > mu) || (mu < ln)){
        end = true;
        if (display)
            std::cout << "Algorithm finished " << std::endl;
        results->stopping_condition = GoldenSectionOrder::AlgorithmFinished;
    }

    while(!end){

        if (ln_performance[1] < mu_performance[1]
        || fabs(ln_performance[1] - mu_performance[1]) < tolerance)
        {
            b = mu;
            mu = ln;
            mu_performance = ln_performance;
            ln = (int)(a+(1.-0.618)*(b-a));

            ln_performance = calculate_performances(ln);
            current_training_performance = ln_performance[0];
            current_generalization_performance = ln_performance[1];
            ln_parameters = get_parameters_order(ln);

            results->order_data.push_back(ln);

            if (reserve_performance_data)
            {
                results->performance_data.push_back(current_training_performance);
            }

            if (reserve_generalization_performance_data)
            {
                results->generalization_performance_data.push_back(current_generalization_performance);
            }

            if (reserve_parameters_data)
            {
                results->parameters_data.push_back(ln_parameters);
            }

        }else
        {
            a = ln;
            ln = mu;
            ln_performance = mu_performance;
            mu = (int)(a+0.618*(b-a));

            mu_performance = calculate_performances(mu);
            current_training_performance = mu_performance[0];
            current_generalization_performance = mu_performance[1];
            mu_parameters = get_parameters_order(mu);

            results->order_data.push_back(mu);

            if (reserve_performance_data)
            {
                results->performance_data.push_back(current_training_performance);
            }

            if (reserve_generalization_performance_data)
            {
                results->generalization_performance_data.push_back(current_generalization_performance);
            }

            if (reserve_parameters_data)
            {
                results->parameters_data.push_back(mu_parameters);
            }

        }

        time(&current_time);
        elapsed_time = difftime(current_time, beginning_time);

        iterations++;

        // Stopping criteria

        if ((ln == mu) || (ln > mu)){
            end = true;
            if (display)
                std::cout << "Algorithm finished " << std::endl;
            results->stopping_condition = GoldenSectionOrder::AlgorithmFinished;
        }else if (elapsed_time > maximum_time)
        {
            end = true;
            if (display)
                std::cout << "Maximum time reached." << std::endl;
            results->stopping_condition = GoldenSectionOrder::MaximumTime;
        }else if (fmin(ln_performance[1],mu_performance[1]) < selection_performance_goal)
        {
            end = true;
            if (display)
                std::cout << "Generalization performance reached." << std::endl;
            results->stopping_condition = GoldenSectionOrder::SelectionPerformanceGoal;
        }else if (iterations > maximum_iterations_number)
        {
            end = true;
            if (display)
                std::cout << "Maximum number of iterations reached." << std::endl;
            results->stopping_condition = GoldenSectionOrder::MaximumIterations;
        }

        if (display && !end)
        {

            std::cout << "Iteration : " << iterations << std::endl;
            std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
            std::cout << "ln final training performance : " << ln_performance[0] << std::endl;
            std::cout << "ln final generalization performance : " << ln_performance[1] << std::endl;
            std::cout << "mu final training performance : " << mu_performance[0] << std::endl;
            std::cout << "mu final generalization performance : " << mu_performance[1] << std::endl;
            std::cout << "Elapsed time : " << elapsed_time << std::endl;
        }
    }

    minimums[0] = calculate_performances(a)[1];
    a_parameters = get_parameters_order(a);

    minimums[1] = calculate_performances(ln)[1];
    ln_parameters = get_parameters_order(ln);

    minimums[2] = calculate_performances(mu)[1];
    mu_parameters = get_parameters_order(mu);

    minimums[3] = calculate_performances(b)[1];
    b_parameters = get_parameters_order(b);

    time(&current_time);
    elapsed_time = difftime(current_time, beginning_time);

    if (display)
    {
        std::cout << "Iteration : " << iterations << std::endl;
        std::cout << "a = " << a << "  ln = " << ln << " mu = " << mu << " b = " << b << std::endl;
        std::cout << "a final training performance : " << calculate_performances(a)[0] << std::endl;
        std::cout << "a final generalization performance : " << calculate_performances(a)[1] << std::endl;
        std::cout << "ln final training performance : " << ln_performance[0] << std::endl;
        std::cout << "ln final generalization performance : " << ln_performance[1] << std::endl;
        std::cout << "mu final training performance : " << mu_performance[0] << std::endl;
        std::cout << "mu final generalization performance : " << mu_performance[1] << std::endl;
        std::cout << "b final training performance : " << calculate_performances(b)[0] << std::endl;
        std::cout << "b final generalization performance : " << calculate_performances(b)[1] << std::endl;
        std::cout << "Elapsed time : " << elapsed_time << std::endl;
    }

    minimum = minimums.calculate_minimum();

    if (fabs(minimums[0] - minimum) < tolerance)
    {
        if (display)
            std::cout << "Optimal order : " << a << std::endl;

        multilayer_perceptron_pointer->set(inputs_number, a, outputs_number);
        multilayer_perceptron_pointer->set_parameters(a_parameters);

        if (reserve_minimal_parameters)
            results->minimal_parameters = a_parameters;

        results->optimal_order = a;
        results->final_generalization_performance = minimums[0];
        results->final_performance = calculate_performances(a)[0];

    }else if (fabs(minimums[1] - minimum) < tolerance)
    {
        if (display)
            std::cout << "Optimal order : " << ln << std::endl;

        multilayer_perceptron_pointer->set(inputs_number, ln, outputs_number);
        multilayer_perceptron_pointer->set_parameters(ln_parameters);

        if (reserve_minimal_parameters)
            results->minimal_parameters = ln_parameters;

        results->optimal_order = ln;
        results->final_generalization_performance = minimums[1];
        results->final_performance = calculate_performances(ln)[0];

    }else if(fabs(minimums[2] - minimum) < tolerance)
    {
        if (display)
            std::cout << "Optimal order : " << mu << std::endl;

        multilayer_perceptron_pointer->set(inputs_number, mu, outputs_number);
        multilayer_perceptron_pointer->set_parameters(mu_parameters);

        if (reserve_minimal_parameters)
            results->minimal_parameters = mu_parameters;

        results->optimal_order = mu;
        results->final_generalization_performance = minimums[2];
        results->final_performance = calculate_performances(mu)[0];

    }else
    {
        if (display)
            std::cout << "Optimal order : " << b << std::endl;

        multilayer_perceptron_pointer->set(inputs_number, b, outputs_number);
        multilayer_perceptron_pointer->set_parameters(b_parameters);

        if (reserve_minimal_parameters)
            results->minimal_parameters = b_parameters;

        results->optimal_order = b;
        results->final_generalization_performance = minimums[3];
        results->final_performance = calculate_performances(b)[0];

    }

    results->elapsed_time = elapsed_time;
    results->iterations_number = iterations;

    return(results);
}
Пример #3
0
int main() {
	std::mt19937 generator(time(nullptr));

	std::vector<std::vector<double>> timeSeries;

	std::ifstream fromFile("resources/data.txt");

	if (!fromFile.is_open()) {
		std::cerr << "Could not open data.txt!" << std::endl;

		return 1;
	}

	// Skip first line
	std::string line;

	std::getline(fromFile, line);

	int numEntries = 1;

	for (int i = 0; i < line.size(); i++)
		if (line[i] == ',')
			numEntries++;

	int numSkipEntries = 1;

	int numEntriesUse = numEntries - numSkipEntries;

	std::vector<double> minimums(numEntriesUse, 999999999.0);
	std::vector<double> maximums(numEntriesUse, -999999999.0);

	while (fromFile.good() && !fromFile.eof()) {
		std::vector<double> entries(numEntriesUse);

		std::string line;

		std::getline(fromFile, line);

		std::istringstream fromLine(line);

		std::string param;

		// Skip entries
		for (int i = 0; i < numSkipEntries; i++) {
			std::string entry;

			std::getline(fromLine, entry, ',');
		}

		for (int i = 0; i < numEntriesUse; i++) {
			std::string entry;

			std::getline(fromLine, entry, ',');

			if (entry == "")
				entries[i] = 0.0;
			else {
				double value = std::stod(entry);

				maximums[i] = std::max(maximums[i], value);
				minimums[i] = std::min(minimums[i], value);

				entries[i] = value;
			}
		}

		timeSeries.push_back(entries);
	}

	// Rescale
	for (int i = 0; i < timeSeries.size(); i++) {
		for (int j = 0; j < timeSeries[i].size(); j++) {
			timeSeries[i][j] = (timeSeries[i][j] - minimums[j]) / std::max(0.0001, (maximums[j] - minimums[j]));
		}
	}

	/*timeSeries.clear();

	timeSeries.resize(10);
	timeSeries[0] = { 0.0f, 1.0f, 0.0f };
	timeSeries[1] = { 0.0f, 0.0f, 0.0f };
	timeSeries[2] = { 1.0f, 1.0f, 0.0f };
	timeSeries[3] = { 0.0f, 0.0f, 1.0f };
	timeSeries[4] = { 0.0f, 1.0f, 0.0f };
	timeSeries[5] = { 0.0f, 0.0f, 1.0f };
	timeSeries[6] = { 0.0f, 0.0f, 0.0f };
	timeSeries[7] = { 0.0f, 0.0f, 0.0f };
	timeSeries[8] = { 0.0f, 1.0f, 0.0f };
	timeSeries[9] = { 0.0f, 1.0f, 1.0f };*/

	std::vector<sdr::IPredictiveRSDR::LayerDesc> layerDescs(3);

	layerDescs[0]._width = 8;
	layerDescs[0]._height = 8;

	layerDescs[1]._width = 6;
	layerDescs[1]._height = 6;

	layerDescs[2]._width = 4;
	layerDescs[2]._height = 4;

	sdr::IPredictiveRSDR prsdr;

	prsdr.createRandom(4, 5, 8, layerDescs, -0.01f, 0.01f, 0.0f, generator);

	float avgError = 1.0f;

	float avgErrorDecay = 0.01f;

	for (int iter = 0; iter < 1000; iter++) {
		for (int i = 0; i < timeSeries.size(); i++) {
			float error = 0.0f;

			for (int j = 0; j < timeSeries[i].size(); j++) {
				error += std::pow(prsdr.getPrediction(j) - timeSeries[i][j], 2);

				prsdr.setInput(j, timeSeries[i][j]);
			}

			avgError = (1.0f - avgErrorDecay) * avgError + avgErrorDecay * error;

			prsdr.simStep(generator);

			if (i % 10 == 0) {
				std::cout << "Iteration " << i << ": " << avgError << std::endl;
			}
		}
	}

	return 0;
}