Esempio n. 1
0
void DNN::backPropSet(const dvec& input,const dvec& output)
{

    unsigned int i;

    feedForward(input);

    unsigned int L = activations.size() - 1;

    /*
     * Start with the final layer
     */
    std::vector<Matrix> d(L+1);

    /*
     * Copy contents
     */
    Matrix out(output.size(),1);

    for(i=0;i<output.size();++i) out(i,0) = output.at(i);

    /*
     * Final layer error
     */
    Matrix DC = Matrix::apply(quad_dC_dA,activations.at(L),out);
    d.at(L)   = Matrix::had(DC,activations.at(L));

    /*
     * Backpropagate
     */
    for(i=L;i>0;--i)
    {

        Matrix wd = weights.at(i-1).T() * d.at(i);
        d.at(i-1)    = Matrix::had( wd, activations.at(i-1) );

    }

    /*
     * Calculate the gradient cost for this set
     */
    for(i=L;i>0;--i)
    {

        bGradient.at(i-1) = bGradient.at(i-1) + d.at(i);

        Matrix wg = d.at(i) * activations.at(i-1).T();
        wGradient.at(i-1) = wGradient.at(i-1) + wg;

    }

}
Esempio n. 2
0
int gauss(dvec& x, dvec& w) {

    int n = x.size();
    double dist = 1;
    double tol = 1e-15;
    int iter = 0;

    // Use Chebyshev-Gauss nodes and initial guess
    initguess(x);
//    chebnodes(x);

    dvec x0(x);
    dvec L(n,0.0);
    dvec Lp(n,0.0);
    dvec a(n,0.0);
    dvec b(n,0.0);

    rec_legendre(a,b);

    // Iteratively correct nodes with Newton's method
    while(dist>tol) {     
        newton_step(a,b,x,L,Lp);
        dist = dist_max(x,x0); 
        ++iter;
        x0 = x;
    } 

    // Compute Weights
    for(int i=0;i<n;++i){
        w[i] = 2.0/((1-x[i]*x[i])*(Lp[i]*Lp[i]));
    }

    return iter;
}
Esempio n. 3
0
// Update grid points as well as the nth Legendre polynomial and its derivative  
void newton_step(const dvec& a, const dvec& b, dvec& x, dvec& L, dvec& Lp) {
    int n = x.size();
    legendre(a,b,x,L,Lp);
    for(int i=0;i<n;++i) {
        x[i] -= L[i]/Lp[i];
    } 
}
Esempio n. 4
0
// Compute Legendre polynomial recursion coefficients
void rec_legendre(dvec& a, dvec& b){
    int n = a.size();
    for(int i=0;i<n;++i) {
        a[i] = (2.0*i+1.0)/(i+1.0);
        b[i] = double(i)/(i+1.0);
    }
}
Esempio n. 5
0
// Compute Chebyshev Gauss Nodes
double chebnodes(dvec& x) {
    int n = x.size();

    for(int i=0;i<n;++i) {
        x[i] = cos(pi*double(2*i+1)/double(2*n));    
    } 
} 
void add_mtime(reclist& thisi, dvec& b, dvec& c, bool debug) {

    if((b.size()==0) & (c.size()==0)) return;

    double maxtime  = thisi.back()->time();
    double mintime = thisi.at(0)->time();
    std::sort(b.begin(), b.end());
    std::sort(c.begin(), c.end());

    b.insert(b.end(), c.begin(), c.end() );
    std::sort(b.begin(), b.end());

    b.erase(unique(b.begin(), b.end()), b.end());

    std::size_t i = 0;

    bool dropmin = true;
    bool dropmax = true;

    // add mtimes from argument
    for(i=0; i < b.size(); ++i) {

        if(b.at(i) <= mintime) {
            if(debug && dropmin) {
                Rcpp::Rcout << "dropping mtimes <=  min observation time" << std::endl;
                dropmin = false;
            }
            continue;
        }

        if(b.at(i) >= maxtime)  {
            if(debug && dropmax) {
                Rcpp::Rcout << "dropping mtimes >= to max observation time" << std::endl;
                dropmax = false;
            }
            break;
        }

        rec_ptr obs = boost::make_shared<datarecord>(100,b[i],0,-100,0);
        obs->output(false);
        obs->from_data(false);
        thisi.push_back(obs);
    }

    std::sort(thisi.begin(), thisi.end(), CompByTimePosRec);
}
Esempio n. 7
0
double initguess(dvec& x) {
    int n = x.size();

    for(int i=0;i<n;++i) {
        x[i] = -cos(pi*double(i+.75)/double(n+0.5));    
    } 

    
}
double simple_energy( const dvec &q , const dvec &p )
{
    using boost::math::pow;
    const size_t N=q.size();
    double e(0.0);
    for( size_t n=0 ; n<N ; ++n )
    {
        e += 0.5*pow<2>( p[0] ) 
            + pow<4>( q[0] )/4.0;
    }
    return e;
}
Esempio n. 9
0
// Compute maximum pointwise difference
double dist_max(const dvec& a, const dvec& b){
    int n = a.size();
    double dist;
    double max_dist;

    for(int i=0; i<n; ++i) {
        dist = std::abs(a[i]-b[i]);
        if(dist>max_dist) {
            max_dist = dist; 
        }  
    }
    return max_dist;
}
Esempio n. 10
0
 dvec operator()( const dvec &x , dvec dxdt )
 {
     const size_t N = x.size();
     //hpx::cout << boost::format("rhs start %d , %d \n") % x.size() % dxdt.size() << hpx::flush;
     dxdt[0] = coupling( x[1]-x[0] );
     for( size_t i=1 ; i<N-1 ; ++i )
     {
         dxdt[i] = coupling( x[i+1]-x[i] ) + coupling( x[i-1]-x[i] );
     }
     dxdt[N-1] = coupling( x[N-2] - x[N-1] );
     //hpx::cout << "rhs end\n" << hpx::flush;
     return dxdt;
 }
Esempio n. 11
0
void ConvNet::learn(dmatrix3 &stimulus, dvec &target)
{
    dvec result(target.size());
    
    result = feedforward(stimulus);
    real er = 0;
    for(int x=0;x<target.size();x++) {
        L4.Errors[x] = sigmoid_p(target[x]) * (target[x] - result[x]);
        er += L4.Errors[x];
    }
    
    std::cout << "Output error: " << er << std::endl;
    
    L3.Errors = L4.backpropagation();
    dvec l2er = L3.backpropagation();
    L2.Errors = fold3(l2er, L2.OutShape);
    L1.Errors = L2.backpropagation();
    
    L4.weight_update(L3.Activations);
    L3.weight_update(flatten(L2.Activations));
    //L2 is a PoolLayer, those doesn't have any weights.
    L1.weight_update(Inputs); // Warning: Input is a pointer!!!
}
Esempio n. 12
0
void DNN::feedForward(const dvec & inputs)
{

    unsigned int layers = activations.size();
    unsigned int i;

    for(i=0;i<inputs.size();++i)
    {
        activations.at(0)(i,0) = sigmoid(inputs.at(i));
    }

    for(i=1;i<layers;++i)
    {
        activations.at(i) = (weights.at(i-1)*activations.at(i-1) + bias.at(i-1)).apply(sigmoid);
    }

}
Esempio n. 13
0
// Evaluate the nth order Legendre polynomial and its derivative
void legendre(const dvec& a, const dvec& b, const dvec& x, dvec& L, dvec& Lp) {
    int n = x.size();
    dvec L0(n,1.0);
    dvec L1(n,0.0);
    
    // Iterate over grid points
    for(int j=0;j<n;++j) {
        L1[j] = x[j];
        // Iterate over polynomials  
        for(int k=1;k<n;++k) {

            L[j] = a[k]*x[j]*L1[j]-b[k]*L0[j];
            L0[j] = L1[j];
            L1[j] = L[j];
        }
        Lp[j] = n*(L0[j]-x[j]*L[j])/(1.0-x[j]*x[j]); 
    } 
} 
double energy( const dvec &q , const dvec &p )
{
    using std::pow;
    using std::abs;
    using boost::math::pow;
    const size_t N=q.size();
    double e(0.0);
    for( size_t n=0 ; n<N-1 ; ++n )
    {
        e += 0.5*pow<2>( p[n] ) 
            + pow( abs(q[n]) , KAPPA )/KAPPA 
            + pow( abs(q[n]-q[n+1]) , LAMBDA )/LAMBDA;
    }
    e += 0.5*pow<2>( p[N-1] ) 
        + pow( abs(q[N-1]) , KAPPA )/KAPPA 
        + pow( abs(q[N-1]-q[0]) , LAMBDA )/LAMBDA;
    return e;
}
Esempio n. 15
0
void QssIntegrator::setState(const dvec& yIn, double tstart_)
{
    assert(yIn.size() == (int) N);
    assert(mathUtils::notnan(yIn));

    // Store and limit to 'ymin' the initial values.
    for (size_t i=0; i<N; i++) {
        if (enforce_ymin[i]) {
            y[i] = std::max(yIn[i], ymin[i]);
        } else {
            y[i] = yIn[i];
        }
    }

    gcount = 0;
    rcount = 0;
    tstart = tstart_;
    tn = 0.0;
    firstStep = true;
}
Esempio n. 16
0
void printvector(const dvec& v) {
    int n = v.size();
    for(int i=0;i<n;++i) {
        std::cout << v[i] << std::endl;
    }  
} 
Esempio n. 17
-1
dvec DNN::predict(const dvec & inputs)
{

    if(inputs.size() != activations.at(0).rows())
        throw std::runtime_error("Input size must equal the number of input neurons");

    feedForward(inputs);

    unsigned int layers = activations.size();

    return activations[layers-1].col_slice(0,0,activations[layers-1].rows()-1);

}