Пример #1
0
static size_t train_sequence(
        const uint8_t **field_buf,
        const size_t *field_len,
        size_t n_fields,
        size_t n_items,
        real *weights,
        size_t weights_len,
        const label *gold,
        double t,
        double *average_weights,
        int use_dropout,
        feat_hash_t dropout_seed)
{
    label crap[n_items];
    beam_search(field_buf, field_len, n_fields, n_items,
                  weights, weights_len, 0, use_dropout, dropout_seed, crap);
    if (!memcmp(crap, gold, n_items*sizeof(label))) return 0;
    size_t n_errs = 0;
    size_t i;
    for (i=0; i<n_items; i++)
        n_errs += (gold[i] != crap[i]);
    adjust_weights(field_buf, field_len, n_fields, n_items,
                   weights, weights_len, gold, (real)1.0,
                   t, average_weights,
                   use_dropout, dropout_seed);
    adjust_weights(field_buf, field_len, n_fields, n_items,
                   weights, weights_len, crap, (real)(-1.0),
                   t, average_weights,
                   use_dropout, dropout_seed);
    return n_errs;
}
Пример #2
0
void epoch(map *m, Inputs *in, int iteration, double timeCst, double *epsilon, int numEpoch)
{
    register int i;
    double radius;
    int dst;
    double theta;    

    radius = max_dbl(m->mapRadius * exp(-iteration / timeCst), 1); 
    radius *= radius;

    neuron *n = find_bmu(m, in);

    for(i = 0; i < m->latice_size; ++i)
    {
        dst = neuron_distance_to(&(m->lattice[i]), n);
        
        if(dst < radius)
        {
            theta = exp(-dst / (2 * radius));
            adjust_weights(&(m->lattice[i]), in, *epsilon, theta);

        }
        *epsilon = EPSILON * exp((double)-iteration / (numEpoch- iteration));  
    }
}
Пример #3
0
/*!\brief Train a network.
 * \param net Pointer to a neural network.
 *
 * Before calling this routine, net_compute() and
 * net_compute_output_error() should have been called to compute outputs
 * for given inputs and to prepare the neural network for training by
 * computing the output error. This routine performs the actual training
 * by backpropagating the output error through the layers.
 */
void
net_train (network_t *net)
{
  assert (net != NULL);

  backward_pass (net);
  adjust_weights (net);
}
Пример #4
0
int MPIDI_CH3_Rendezvous_rget_send_finish(MPIDI_VC_t * vc,
                                     MPIDI_CH3_Pkt_rget_finish_t *rget_pkt)
{
    int mpi_errno = MPI_SUCCESS;
    int complete;
    MPID_Request *sreq;
    MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_RNDV_RGET_SEND_FINISH);
    MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_RNDV_RGET_SEND_FINISH);

    MPID_Request_get_ptr(rget_pkt->sender_req_id, sreq);

    if (!MPIDI_CH3I_MRAIL_Finish_request(sreq)) {
        return MPI_SUCCESS;
    }

    MPIDI_CH3I_MRAILI_RREQ_RNDV_FINISH(sreq);

#if 0
    if(MPIDI_CH3I_RDMA_Process.has_hsam && 
            ((req->mrail.rndv_buf_sz > rdma_large_msg_rail_sharing_threshold))) {

        /* Adjust the weights of different paths according to the
         * timings obtained for the stripes */

        adjust_weights(v->vc, req->mrail.stripe_start_time,
                req->mrail.stripe_finish_time, req->mrail.initial_weight);
    }
#endif

    MPIDI_CH3U_Handle_send_req(vc, sreq, &complete);

    if (complete != TRUE)
    {
        mpi_errno = MPIR_Err_create_code(
            mpi_errno,
            MPIR_ERR_FATAL,
            FCNAME,
            __LINE__,
            MPI_ERR_OTHER,
            "**fail",
            0);
        goto fn_exit;
    }

#if defined(CKPT)
    MPIDI_CH3I_CR_req_dequeue(sreq);
#endif /* defined(CKPT) */

fn_exit:
    MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_RNDV_RGET_SEND_FINISH);
    return mpi_errno;

}