コード例 #1
0
ファイル: optimizer.cpp プロジェクト: Elisane/PRAT
FT Scene::optimize_weights_via_gradient_descent(FT timestep, bool update)
{
    std::vector<FT> gradient;
    compute_weight_gradient(gradient, -1.0);
    
    std::vector<FT> weights;
    collect_visible_weights(weights);
    
    if (timestep <= 0.0)
    {
        LSWeights line_search(this, 10, 2.0);
        timestep = line_search.run_bt(weights, gradient);
    } else {
        for (unsigned i = 0; i < weights.size(); ++i)
        {
            FT wi = weights[i];
            FT gi = gradient[i];
            weights[i] = wi + timestep*gi;
        }
        update_weights(weights);
        if (update) update_triangulation();
    }

    compute_weight_gradient(gradient);
    return compute_norm(gradient);
}
コード例 #2
0
int end(int winner, char *reason)
{
/*      char *foo[] = {"1/2-1/2", "1-0", "0-1" }; */

    if (weight_mode)
    {
	update_weights(winner);
	save_weights(weightfile);
    }

/*      if ((winner>=-1)&&(winner<=1)) */
/*      { */
/*  	output("%s {%s}\n", foo[winner+1],reason); */
/*      } */
/*      else */
/*      { */
/*  	output("%s {%s}\n", "ended", reason); */
/*      } */
/*      if (((computer[WHITE]+computer[BLACK])>0)&&book_mode) */
/*      { */
/*  	hardupdatebook(WHITE, bookfile); */
/*      } */
    
    return winner;
}
コード例 #3
0
ファイル: cardr.c プロジェクト: dppb05/cardr_c
double run() {
    printf("Initialization.\n");
    init_memb();
    if(verbose) print_memb(&memb);
    init_weights();
    if(verbose) print_weights(&weights);
    beta = 0.0;
    double adeq = adequacy();
    printf("Adequacy: %.15lf\n", adeq);
    double prev_iter_adeq;
    double adeq_diff;
    size_t iter = 1;
    st_matrix prev_memb;
    init_st_matrix(&prev_memb, objc, clustc);
    do {
        printf("Iteration %d:\n", iter);
        prev_iter_adeq = adeq;
        global_dissim();
        compute_membvec();
        if(compute_dists()) {
            do {
                if(verbose) {
                    printf("Distances:\n");
                    print_st_matrix(&dists, 10, true);
                }
            } while(adjust_dists());
        }
        if(verbose) {
            printf("Distances:\n");
            print_st_matrix(&dists, 10, true);
        }
        mtxcpy(&prev_memb, &memb);
        update_memb();
        if(verbose) print_memb(&memb);
        update_weights();
        if(verbose) print_weights(&weights);
        adeq = adequacy();
        printf("Adequacy: %.15lf\n", adeq);
        adeq_diff = prev_iter_adeq - adeq;
        if(adeq_diff < 0.0) {
            adeq_diff = fabs(adeq_diff);
            printf("Warn: previous iteration adequacy is greater "
                    "than current (%.15lf).\n", adeq_diff);
        }
        if(adeq_diff < epsilon) {
            printf("Adequacy difference threshold reached (%.15lf)."
                    "\n", adeq_diff);
            break;
        }
        if(++iter > max_iter) {
            printf("Maximum number of iterations reached.\n");
            break;
        }
    } while(true);
    free_st_matrix(&prev_memb);
    printf("Beta: %.15lf\n", beta);
    return adeq;
}
コード例 #4
0
void cmd_new(char *s)
{
    if (gameoverp(tomove()) == IN_PROGRESS)
    {
	if (weight_mode)
	    update_weights(-2);
    }
    
    initialize();
}
コード例 #5
0
void MultilayerPerceptron::train_sync(cv::Mat train_data,
	cv::Mat expected_outputs)
{
	cv::Mat output;
	for(int i = 0; i < train_data.rows; ++i)
	{
		output = feed_forward(train_data.row(i));
		backpropagation(expected_outputs.row(i), output);
		update_weights();
	}
}
コード例 #6
0
ファイル: model_perceptron.c プロジェクト: bakineggs/mrscake
static model_t*perceptron_train(perceptron_model_factory_t*factory, dataset_t*d)
{
    int num_iterations = d->num_rows*100;
    double base_eta = 0.1;
    double lastperf = 1.0;
    double currentperf = -1;
    int t;

    double*weights = calloc(sizeof(double), d->num_columns);

    if(dataset_has_categorical_columns(d))
        return NULL;
    if(d->desired_response->num_classes > 2)
        return NULL;

    double class_to_level[2] = {-1, 1};
    for(t=1;t<num_iterations;t++)
    {
        int i = lrand48() % d->num_rows;
        double eta = base_eta / t;
        if(predict(d, weights, i) != d->desired_response->entries[i].c) {
            update_weights(weights, d, i, eta);
        }
    }

    expanded_columns_t*expanded_columns = expanded_columns_new(d);
    START_CODE(program)
    BLOCK

        IF
            LT
            ADD
                for(t=0;t<d->num_columns;t++) {
                    MUL
                        INSERT_NODE(expanded_columns_parameter_code(expanded_columns, t))
                        FLOAT_CONSTANT(weights[t])
                    END;
                }
            END;
            FLOAT_CONSTANT(0.0);
            END;
        THEN
            GENERIC_CONSTANT(d->desired_response->classes[0]);
        ELSE
            GENERIC_CONSTANT(d->desired_response->classes[1]);
        END;

    END_CODE;
    expanded_columns_destroy(expanded_columns);

    model_t*m = model_new(d);
    m->code = program;
    return m;
}
コード例 #7
0
ファイル: mvfcmv.c プロジェクト: RomeroBarata/IN1102_Project
// Main loop for an instance of the algorithm.
double run() {
	size_t i;
	size_t j;
	size_t k;
	printf("Initialization.\n");
	init_medoids();
    if(verbose) print_medoids(medoids);
	for(k = 0; k < clustc; ++k) {
		for(j = 0; j < dmatrixc; ++j) {
			weights[k][j] = 1.0;
		}
	}
	if(verbose) print_weights(weights);
	update_memb();
	if(verbose) print_memb(memb);
	double prev_adeq = 0.0;
	double adeq = adequacy_obj(false);
	printf("Adequacy: %.20lf\n", adeq);
    double diff = fabs(adeq - prev_adeq);
	for(i = 1; i <= max_iter && diff > epsilon; ++i) {
        printf("Iteration %d.\n", i);
        prev_adeq = adeq;
		adequacy_cluster(false);
        update_medoids();
		adeq = adequacy_cluster(true);
        if(verbose) {
            print_medoids(medoids);
            printf("Adequacy1: %.20lf\n", adeq);
        }
		adequacy_cluster(false);
        update_weights();
		adeq = adequacy_cluster(true);
        if(verbose) {
            print_weights(weights);
            printf("Adequacy2: %.20lf\n", adeq);
        }
		adequacy_obj(false);
        update_memb();
		adeq = adequacy_obj(true);
        if(verbose) print_memb(memb);
        printf("Adequacy: %.20lf\n", adeq);
        if(dgt(adeq, prev_adeq)) {
            printf("Warn: current adequacy is greater than "
                    "previous iteration (%.20lf)\n",
                    adeq - prev_adeq);
        }
        diff = fabs(adeq - prev_adeq);
	}
    printf("Adequacy difference threshold reached (%.20lf).\n",
            diff);
    return adeq;
}
コード例 #8
0
ファイル: backprop.c プロジェクト: rdadolf/msms
void backprop(TYPE weights1[input_dimension*nodes_per_layer], 
                TYPE weights2[nodes_per_layer*nodes_per_layer],
                TYPE weights3[nodes_per_layer*possible_outputs],
                TYPE biases1[nodes_per_layer], 
                TYPE biases2[nodes_per_layer],
                TYPE biases3[possible_outputs],
                TYPE training_data[training_sets*input_dimension],
                TYPE training_targets[training_sets*possible_outputs]) {
    int i,j;
    //Forward and training structures
    TYPE activations1[nodes_per_layer];
    TYPE activations2[nodes_per_layer];
    TYPE activations3[possible_outputs];
    TYPE dactivations1[nodes_per_layer];
    TYPE dactivations2[nodes_per_layer];
    TYPE dactivations3[possible_outputs];
    TYPE net_outputs[possible_outputs];
    //Training structure
    TYPE output_difference[possible_outputs];
    TYPE delta_weights1[input_dimension*nodes_per_layer]; 
    TYPE delta_weights2[nodes_per_layer*nodes_per_layer];
    TYPE delta_weights3[nodes_per_layer*possible_outputs];
    TYPE oracle_activations1[nodes_per_layer];
    TYPE oracle_activations2[nodes_per_layer];

    for(i=0; i<training_sets; i++){
        for(j=0;j<nodes_per_layer;j++){
            activations1[j] = (TYPE)0.0;
            activations2[j] = (TYPE)0.0;
            if(j<possible_outputs){
                activations3[j] = (TYPE)0.0;
            }
        }
        matrix_vector_product_with_bias_input_layer(biases1, weights1, activations1, &training_data[i*input_dimension]);
        RELU(activations1, dactivations1, nodes_per_layer);
        matrix_vector_product_with_bias_second_layer(biases2, weights2, activations2, activations1);
        RELU(activations2, dactivations2, nodes_per_layer);
        matrix_vector_product_with_bias_output_layer(biases3, weights3, activations3, activations2);
        RELU(activations3, dactivations3, possible_outputs);
        soft_max(net_outputs, activations3);
        take_difference(net_outputs, &training_targets[i*possible_outputs], output_difference, dactivations3);
        get_delta_matrix_weights3(delta_weights3, output_difference, activations2);
        get_oracle_activations2(weights3, output_difference, oracle_activations2, dactivations2);
        get_delta_matrix_weights2(delta_weights2, oracle_activations2, activations1);
        get_oracle_activations1(weights2, oracle_activations2, oracle_activations1, dactivations1);
        get_delta_matrix_weights1(delta_weights1, oracle_activations1, &training_data[i*input_dimension]);
        update_weights(weights1, weights2, weights3, delta_weights1, delta_weights2, delta_weights3, 
                       biases1, biases2, biases3, oracle_activations1, oracle_activations2, output_difference);
    }
}
コード例 #9
0
ファイル: perc.cpp プロジェクト: ryleyherrington/perceptron
//p_train based on the online perceptron algorithm 
std::vector<float> p_train(XDATA x, YDATA y, int epochs, bool shuffled_flag)
{
    std::vector<float> w;
    w.resize(4); //make w a vector of size 4 initialized to 0

    bool updated = true;
    while (updated) { //while we are reading through
        updated = false;
        for (int i=0; i<epochs; i++) { //For how many iterations:
			updated = update_weights(x[i], y[i], w); //update the weight each row
    		if (shuffled_flag) 
				shuffle(x, y, epochs);//shuffle if we want to
        }
    }
    return w;
}
コード例 #10
0
int AdaBoost2::boosting()
{
  int T = m_all_weak_classifiers.size();

  for (int t = 0; t < T; t++)
  {
      calc_all_weighted_error();

      if (m_min_weighted_error >= 0.5 || m_picked_classifiers.size() >= 6)
      {
        if (m_picked_classifiers.size() <= 0)
        {
          cerr << "Error in boosting, No single weak classifier.." << endl;
        }
        // abort
        //cout << "Attr: " << attribute_names[m_attribute] << " Abort boosting at iteration " << t << endl;
        break;
      }

      update_weights();

  }

  cout << "Strong classifier:" << endl;
  for (vector<tr1::shared_ptr<WeakClassifier> >::iterator it = m_picked_classifiers.begin();
      it != m_picked_classifiers.end();
      it++)
  {
    double alpha, error, lambda;
    uint64 feature_type;
    (*it)->alpha(alpha);
    (*it)->weighted_error(error);
    (*it)->feature_type(feature_type);
//    lambda = 1.0 / pow(10, (feature_type >> 16) + 1);
    feature_type &= 0xFFFFUL;

//    cout << alpha << ": " << map_feature_type_name(feature_type) << " lambda-" << lambda << " :" << error << endl;
    cout << alpha << ": " << map_feature_type_name(feature_type) <<  " :" << error << endl;

  }

  return 0;
}
コード例 #11
0
ファイル: perc.cpp プロジェクト: ryleyherrington/perceptron
int main(int argc, char *argv[])
{
    int  num_epochs  = argc>1 ? atoi(argv[1]) : 0;
    bool do_shuffle  = argc>2 ? *argv[2] == 'y' : false;
    FILE *train_file = openfile( argc>3 ? argv[3] : (char*)"perceptron_train.csv");
    FILE *test_file  = openfile( argc>4 ? argv[4] : (char*)"perceptron_test.csv");

    XDATA xtrain, xtest;
    YDATA ytrain, ytest;
    read_data(train_file, xtrain, ytrain);

    if (num_epochs == 0)
        num_epochs = xtrain.size();
	//read in training data and figure out w based on those
    std::vector<float> w = p_train(xtrain, ytrain, num_epochs, do_shuffle);
    fprintf(stdout, "weight vector = %f %f %f %f\n", w[0],w[1],w[2],w[3]);

    int failcount = 0;
	//read in test data
    read_data(test_file,  xtest,  ytest);
    for (int i=0; i<xtest.size(); i++){
		//call classify on test_file and until it fails just keep going
        int y = p_classify(xtest[i], w);
		//if we fail, it means we need to re classify
        if (y != ytest[i]) { 
			//update the weights since we failed
            update_weights(xtest[i], ytest[i], w);
			//change it so that we learned
            y = p_classify(xtest[i], w);
            if (y != ytest[i]) {
				//update failure count so we know that we got better
                failcount++;
            }
        }
    }

    fprintf(stdout, "Failures = %d\nEpochs = %d\n\n", failcount, num_epochs);
    fprintf(stdout, "Failures in training: %d\n", test_fails);
    return 0;
}
コード例 #12
0
ファイル: router.cpp プロジェクト: fritzo/pomagma
void Router::fit_language(
    const std::unordered_map<std::string, size_t>& symbol_counts,
    const std::unordered_map<Ob, size_t>& ob_counts, float reltol) {
    POMAGMA_INFO("Fitting language");
    const size_t item_count = m_carrier.item_count();
    std::vector<float> ob_probs(1 + item_count, 0);
    std::vector<float> ob_weights(1 + item_count, 0);
    std::vector<float> symbol_weights(m_types.size(), 0);
    POMAGMA_ASSERT_EQ(m_types.size(), m_language.size());
    const float max_increase = 1.0 + reltol;

    bool changed = true;
    while (changed) {
        changed = false;

        update_probs(ob_probs, reltol);

        update_weights(ob_probs, symbol_counts, ob_counts, symbol_weights,
                       ob_weights, reltol);

        POMAGMA_DEBUG("optimizing language");
        float total_weight = 0;
        for (float weight : symbol_weights) {
            total_weight += weight;
        }
        for (size_t i = 0; i < m_types.size(); ++i) {
            SegmentType& type = m_types[i];
            float new_prob = symbol_weights[i] / total_weight;
            float old_prob = type.prob;
            type.prob = new_prob;
            m_language[type.name] = new_prob;

            if (new_prob > old_prob * max_increase) {
                changed = true;
            }
        }
    }
}
コード例 #13
0
ファイル: rbm.cpp プロジェクト: hakimsd9/RBM
// train model
void rbm::train(float ** weights, float * visible_bias, float * hidden_bias){
    std::cout << "Training model..." << std::endl;

    // weights, visible_bias and hidden_bias were randomly initialized
    // Initialize delta_weights, delta_hidden_bias, and delta_visible_bias to 0s
    float ** delta_weights = new float*[num_hidden_units];
    for (int i=0; i<num_hidden_units; i++){
        delta_weights[i] = new float[num_visible_units];
    }
    float * delta_hidden_bias = new float[num_hidden_units];
    float * delta_visible_bias = new float[num_visible_units];

    float ** features = new float*[number_of_data_points];
    for (int i=0; i<number_of_data_points; i++){
        features[i] = new float[num_visible_units];
    }

    for (int i=0; i<number_of_data_points; i++){
        for (int j=0; j<num_visible_units; j++){
            features[i][j] = input_features[i][j];
        }
    }

    // random indexes of the data points that will be chosen at each iteration of sga
    int * idxes_batch = new int[size_minibatch];

	int inner_iter = number_of_data_points/num_epochs;
    // Perform K-step cd num_epochs time
    for (int iter=0; iter<num_epochs; iter++){
		for (int i=0; i<inner_iter; i++){
       		// sample minibatch and perform cd on this mini batch
        	// sample minibatch
        	rand_init_vec_int(idxes_batch, size_minibatch, number_of_data_points);

        	// set deltas to zeros at every iteration in cd
        	cd(features, weights, hidden_bias, visible_bias, delta_weights, delta_hidden_bias,
            	    delta_visible_bias, K, idxes_batch, size_minibatch);

        	// update parameters
        	update_weights(weights, delta_weights, learning_rate, num_hidden_units, num_visible_units, size_minibatch);
        	update_visible_bias(visible_bias, delta_visible_bias, learning_rate, num_visible_units, size_minibatch);
        	update_hidden_bias(hidden_bias, delta_hidden_bias, learning_rate, num_hidden_units, size_minibatch);
		}
    }

    // release data
    delete[] idxes_batch;
    delete[] delta_hidden_bias;
    delete[] delta_visible_bias;

    for (int i=0; i<num_hidden_units; i++){
        delete[] delta_weights[i];
    }
    delete[] delta_weights;

    for (int i=0; i<number_of_data_points; i++){
        delete[] features[i];
    }
    delete[] features;
//    std::cout << "Training model: DONE" << std::endl;
}
コード例 #14
0
ファイル: main.cpp プロジェクト: zbxzc35/my_adaboost
StrongClassifier* adaboost_learning(CascadeClassifier *cc, std::list<float *> &positiveSet, int numPos,
        std::list<float *> &negativeSet, int numNeg, std::list<float *> &validateSet, std::vector<Feature *> &featureSet,
            float maxfpr, float maxfnr)
{
    StrongClassifier *sc = new StrongClassifier;

    int width = cc->WIDTH;
    int height = cc->HEIGHT;

    float *weights = NULL, *values = NULL;

    int sampleSize = numPos + numNeg;
    int fsize = featureSet.size();

    float cfpr = 1.0;

    init_weights(&weights, numPos, numNeg);

    values = new float[sampleSize];
    memset(values, 0, sizeof(float) * sampleSize);

    while(cfpr > maxfpr)
    {
        std::list<float *>::iterator iter;
        float minError = 1, error, beta;
        WeakClassifier *bestWC = NULL;

        for(int i = 0; i < fsize; i++)
        {
            Feature *feat = new Feature;
            WeakClassifier *wc = new WeakClassifier;

            init_feature(feat, featureSet[i]);
            init_weak_classifier(wc, 0, 0, feat);

            iter = positiveSet.begin();
            for(int j = 0; j < numPos; j++, iter++)
                values[j] = get_value(feat, *iter, width, 0, 0);

            iter = negativeSet.begin();
            for(int j = 0; j < numNeg; j++, iter++)
                values[j + numPos] = get_value(feat, *iter, width, 0, 0);

            error = train(wc, values, numPos, numNeg, weights);

            if(error < minError)
            {
                if(bestWC != NULL){
                    clear(bestWC);
                    bestWC = NULL;
                }

                bestWC = wc;

                minError = error;

                printf("Select best weak classifier, min error: %f\r", minError);
                fflush(stdout);
            }

            else
                delete wc;
        }

        assert(minError > 0);

        printf("best weak classifier error = %f                      \n", minError);

        beta = minError / (1 - minError);

        int tp = 0;
        iter = positiveSet.begin();

        for(int i = 0; i < numPos; i++, iter++){
            if(classify(bestWC, *iter, width, 0, 0) == 1){
                weights[i] *= beta;
                tp ++;
            }
        }

        int tn = 0;
        iter = negativeSet.begin();

        for(int i = numPos; i < sampleSize; i++, iter++){
            if(classify(bestWC, *iter, width, 0, 0) != 1){
                weights[i] *= beta;
                tn++;
            }
        }

        update_weights(weights, numPos, numNeg);

        printf("TP = %d, TN = %d, beta = %f, log(1/beta) = %f\n", tp, tn, beta, log(1/beta));

        add(sc, bestWC, log(1/beta));

        train(sc, positiveSet, width, maxfnr);

        cfpr = fpr(sc, validateSet, width);

        printf("fpr validate: %f\n", fpr(sc, validateSet, width));
        printf("fpr negative: %f\n", fpr(sc, negativeSet, width));

        printf("\n");
    }

    printf("\nWeak classifier size %ld\n", sc->wcs.size());


    delete [] values;
    delete [] weights;

    return sc;
}