Esempio n. 1
0
void back_propagation_instant(NeuralNetwork& nn, train_set& set)
{
    //algorithm from http://ufldl.stanford.edu/tutorial/supervised/MultiLayerNeuralNetworks/

    set.progress = 0;
    size_t num_lyrs, num_neurons, num_connections;

    std::vector<values_matrix_t> delta_W;
    values_matrix_t delta_b;

    num_lyrs = nn.net.size();
    delta_W.resize(num_lyrs);
    delta_b.resize(num_lyrs);
    //size delta_W/b
    for(int lyr=0; lyr<num_lyrs; lyr++){
        num_neurons = nn.net[lyr].size();
        delta_W[lyr].resize(num_neurons);
        delta_b[lyr].resize(num_neurons);
        for(int neu=0; neu<num_neurons; neu++){
            num_connections = nn.net[lyr][neu].size();
            delta_W[lyr][neu].resize(num_connections);
        }
    }

    for(; set.progress < set.iterations; set.progress++){
        //alternative algorithm:

        for(int m=0; m<set.input.size(); m++){
            for(int lyr=0; lyr<num_lyrs; lyr++){
                num_neurons = nn.net[lyr].size();

                for(int neu=0; neu<num_neurons; neu++){
                    num_connections = nn.net[lyr][neu].size();
                    delta_b[lyr][neu] = 0.0;

                    for(int con=0; con<num_connections; con++){
                        delta_W[lyr][neu][con] = 0.0;
                    }
                }
            }

            back_propagate(nn, set.input[m], set.output[m], delta_W, delta_b);

            num_lyrs = nn.net.size();
            for(int lyr=0; lyr<num_lyrs; lyr++){
                num_neurons = nn.net[lyr].size();

                for(int neu=0; neu<num_neurons; neu++){
                    num_connections = nn.net[lyr][neu].size();
                    nn.net[lyr][neu].bias -= set.learn_rate * delta_b[lyr][neu];

                    for(int con=0; con<num_connections; con++){
                        nn.net[lyr][neu][con] -= set.learn_rate * (delta_W[lyr][neu][con]  + set.weight_decay*nn.net[lyr][neu][con]  );
                    }
                }
            }
        }

    }
}
Esempio n. 2
0
void solve_pbqp_heuristical(pbqp_t *pbqp)
{
#ifndef NDEBUG
	assert(pbqp);
	assert(pbqp->solution == INF_COSTS && "PBQP already solved");
	pbqp->solution = 0;
#endif

	/* Reduce nodes degree ... */
	initial_simplify_edges(pbqp);

	/* ... and put node into bucket representing their degree. */
	fill_node_buckets(pbqp);

#if KAPS_STATISTIC
	FILE *fh = fopen("solutions.pb", "a");
	fprintf(fh, "Solution");
	fclose(fh);
#endif

	apply_heuristic_reductions(pbqp);

	pbqp->solution = determine_solution(pbqp);

#if KAPS_STATISTIC
	fh = fopen("solutions.pb", "a");
	#if KAPS_USE_UNSIGNED
		fprintf(fh, ": %u RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
		        pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
		        pbqp->num_rm, pbqp->num_rn);
	#else
		fprintf(fh, ": %lld RE:%u R0:%u R1:%u R2:%u RM:%u RN/BF:%u\n", pbqp->solution,
		        pbqp->num_edges, pbqp->num_r0, pbqp->num_r1, pbqp->num_r2,
		        pbqp->num_rm, pbqp->num_rn);
	#endif
	fclose(fh);
#endif

	/* Solve reduced nodes. */
	back_propagate(pbqp);

	free_buckets();
}
Esempio n. 3
0
void back_propagation(NeuralNetwork& nn, train_set& set, int batch_size, int offset)
{
    //algorithm from http://ufldl.stanford.edu/tutorial/supervised/MultiLayerNeuralNetworks/

    if(batch_size == -1)
        batch_size = set.input.size();

    set.progress = 0;
    size_t num_lyrs, num_neurons, num_connections;
    int m;

    std::vector<values_matrix_t> delta_W;
    values_matrix_t delta_b;

    num_lyrs = nn.net.size();
    delta_W.resize(num_lyrs);
    delta_b.resize(num_lyrs);
    //size delta_W/b
    for(int lyr=0; lyr<num_lyrs; lyr++){
        num_neurons = nn.net[lyr].size();
        delta_W[lyr].resize(num_neurons);
        delta_b[lyr].resize(num_neurons);
        for(int neu=0; neu<num_neurons; neu++){
            num_connections = nn.net[lyr][neu].size();
            delta_W[lyr][neu].resize(num_connections);
        }
    }

    for(; set.progress < set.iterations; set.progress++){
        //normal algorithm:

        //prep delta W, delta b
        for(int lyr=0; lyr<num_lyrs; lyr++){
            num_neurons = nn.net[lyr].size();

            for(int neu=0; neu<num_neurons; neu++){
                num_connections = nn.net[lyr][neu].size();
                delta_b[lyr][neu] = 0.0;

                for(int con=0; con<num_connections; con++){
                    delta_W[lyr][neu][con] = 0.0;
                }
            }
        }


        //training calculations
        if(offset + batch_size >= set.input.size()){
            offset = 0;
        }

        for(m=0; m<batch_size; m++){
            back_propagate(nn, set.input[offset+m], set.output[offset+m], delta_W, delta_b);
        }
        offset += batch_size;

        //update parameters
        num_lyrs = nn.net.size();
        for(int lyr=0; lyr<num_lyrs; lyr++){
            num_neurons = nn.net[lyr].size();

            for(int neu=0; neu<num_neurons; neu++){
                num_connections = nn.net[lyr][neu].size();
                nn.net[lyr][neu].bias -= set.learn_rate * delta_b[lyr][neu]/batch_size;//see step above, m=input.size()

                for(int con=0; con<num_connections; con++){
                    nn.net[lyr][neu][con] -= set.learn_rate * (delta_W[lyr][neu][con]/batch_size  + set.weight_decay*nn.net[lyr][neu][con]  );
                }
            }
        }

    }
}