示例#1
0
func_t *func_poly_monic(func_t *f)
{
  func_t *a=NULL;
  if(!(func_is_poly(f))){ FUNC_ERROR_ARG1("func_poly_monic",f); }
  a=func_poly_get_lc(f);
  if(a==NULL || func_is_one(a)){ return f; }  
  return func_expand(func_mul(func_inv(FR(a)),f));
}
示例#2
0
文件: neuron.cpp 项目: Zinurist/CNN
void Neuron::reverse_activate()
{
    value = func_inv(value);
}
示例#3
0
文件: training.cpp 项目: Zinurist/CNN
void back_propagate(NeuralNetwork& nn, const values_t& input, const values_t& expected_output, std::vector<values_matrix_t>& delta_W, values_matrix_t& delta_b)
{
    //algorithm from http://ufldl.stanford.edu/tutorial/supervised/MultiLayerNeuralNetworks/
    //Neuron(W) -> z -> func() -> a
    //input -> NN -> output/y

    //step 1
    values_t output;
    nn.process(input, output);

    values_t& prev = output;
    TYPE tmp;
    int lyr;
    int num_lyrs, num_neurons, num_connections;
    values_matrix_t error;

    error.resize(nn.net.size());

    //step 2
    //output layer first
    lyr = error.size()-1;
    num_neurons = nn.net[lyr].size();
    error[lyr].resize(num_neurons);
    for(int n=0; n<num_neurons; n++){
        error[lyr][n] = func_deriv( func_inv(output[n]) ); //->f'(z)
        error[lyr][n] *= (output[n] - expected_output[n]); //-> -(y-a) = (a-y)
    }
    prev = error[lyr];

    //step 3
    //from output layer to layer 1 (layer after the input layer), but counting from output-1 to 0
    for(; --lyr >=0;){
        num_neurons = nn.net[lyr].size();
        error[lyr].resize(num_neurons);

        for(int n=0; n<num_neurons; n++){
            error[lyr][n] = func_deriv( func_inv( nn.net[lyr][n].value ) ); //->f'(z)
            tmp = 0;
            for(int j=0; j<prev.size(); j++){ //-> W^T*error[lyr+1] = W^T*prev
                tmp += prev[j] * nn.net[lyr+1][j][n];
            }
            error[lyr][n] *= tmp;
        }

        prev = error[lyr];
    }


    //step 4
    //add to delta W, delta b
    num_lyrs = error.size();
    for(lyr=0; lyr<num_lyrs; lyr++){
        num_neurons = error[lyr].size();

        for(int neu=0; neu<num_neurons; neu++){
            num_connections = nn.net[lyr][neu].size();

            delta_b[lyr][neu] += error[lyr][neu];
            for(int con=0; con<num_connections; con++){
                //nn.net[lyr-1] -> lyr before current layer
                //nn.net[lyr-1][con] -> specific neuron before current layer (num_neurons of [lyr-1] = num_connections of [lyr])
                if(lyr == 0)
                    delta_W[lyr][neu][con] += error[lyr][neu]*input[con];
                else
                    delta_W[lyr][neu][con] += error[lyr][neu]*nn.net[lyr-1][con].value;
            }
        }
    }

}