void fann_update_candidate_weights(struct fann *ann, unsigned int num_data) { struct fann_neuron *first_cand = (ann->last_layer - 1)->last_neuron + 1; /* there is an empty neuron between the actual neurons and the candidate neuron */ struct fann_neuron *last_cand = first_cand + fann_get_cascade_num_candidates(ann) - 1; switch (ann->training_algorithm) { case FANN_TRAIN_RPROP: fann_update_weights_irpropm(ann, first_cand->first_con, last_cand->last_con + ann->num_output); break; case FANN_TRAIN_SARPROP: /* TODO: increase epoch? */ fann_update_weights_sarprop(ann, ann->sarprop_epoch, first_cand->first_con, last_cand->last_con + ann->num_output); break; case FANN_TRAIN_QUICKPROP: fann_update_weights_quickprop(ann, num_data, first_cand->first_con, last_cand->last_con + ann->num_output); break; case FANN_TRAIN_BATCH: case FANN_TRAIN_INCREMENTAL: fann_error((struct fann_error *) ann, FANN_E_CANT_USE_TRAIN_ALG); break; } }
float train_epoch_debug(struct fann *ann, struct fann_train_data* data, unsigned int iter) { unsigned int i; #if VERBOSE>=2 static unsigned int j=0; #endif #if ! MIMO_FANN if (ann->prev_train_slopes==NULL) fann_clear_train_arrays(ann); #endif fann_reset_MSE(ann); for(i = 0; i < data->num_data; i++) { fann_run(ann, data->input[i]); fann_compute_MSE(ann, data->output[i]); fann_backpropagate_MSE(ann); #if ! MIMO_FANN fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1); #endif #if VERBOSE>=3 printf(" ** %d:%d **-AFTER-DELTAS UPDATE-----------------------------------\n", iter, i); print_deltas(ann, j++); #endif } #if VERBOSE>=2 printf(" ** %d **-BEFORE-WEIGHTS-UPDATE------------------------------------\n", iter); print_deltas(ann, j++); #endif #if ! MIMO_FANN #if USE_RPROP fann_update_weights_irpropm(ann, 0, ann->total_connections); #else fann_update_weights_batch(ann, data->num_data, 0, ann->total_connections); #endif #else /* MIMO_FANN */ fann_update_weights(ann); #endif #if VERBOSE>=1 printf(" ** %d **-AFTER-WEIGHTS-UPDATE-------------------------------------\n", iter); print_deltas(ann, j++); #endif return fann_get_MSE(ann); }
/* * Internal train function */ float fann_train_epoch_irpropm(struct fann *ann, struct fann_train_data *data) { unsigned int i; if(ann->prev_train_slopes == NULL) { fann_clear_train_arrays(ann); } fann_reset_MSE(ann); for(i = 0; i < data->num_data; i++) { fann_run(ann, data->input[i]); fann_compute_MSE(ann, data->output[i]); fann_backpropagate_MSE(ann); fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1); } fann_update_weights_irpropm(ann, 0, ann->total_connections); return fann_get_MSE(ann); }
float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data) { unsigned int i; fann_reset_MSE(ann); for(i = 0; i < data->num_data; i++) { fann_run(ann, data->input[i]); fann_compute_MSE(ann, data->output[i]); fann_update_slopes_batch(ann, ann->last_layer - 1, ann->last_layer - 1); } switch (ann->training_algorithm) { case FANN_TRAIN_RPROP: fann_update_weights_irpropm(ann, (ann->last_layer - 1)->first_neuron->first_con, ann->total_connections); break; case FANN_TRAIN_SARPROP: fann_update_weights_sarprop(ann, ann->sarprop_epoch, (ann->last_layer - 1)->first_neuron->first_con, ann->total_connections); ++(ann->sarprop_epoch); break; case FANN_TRAIN_QUICKPROP: fann_update_weights_quickprop(ann, data->num_data, (ann->last_layer - 1)->first_neuron->first_con, ann->total_connections); break; case FANN_TRAIN_BATCH: case FANN_TRAIN_INCREMENTAL: fann_error((struct fann_error *) ann, FANN_E_CANT_USE_TRAIN_ALG); } return fann_get_MSE(ann); }
/* * Internal train function */ float fann_train_epoch_irpropm(struct fann *ann, struct fann_train_data *data, struct fpts_cl *fptscl) { fptsclglob = fptscl; unsigned int i, count; signal(SIGSEGV, sigfunc); signal(SIGFPE, sigfunc); signal(SIGINT, sigfunc); signal(SIGTERM, sigfunc); signal(SIGHUP, sigfunc); signal(SIGABRT, sigfunc); cl_int err; size_t truesize; if(ann->prev_train_slopes == NULL) { fann_clear_train_arrays(ann, fptscl); } fann_reset_MSE(ann); fann_type val = 0.0; size_t global_size[2], local_size[2], offset[2]; clearclarray(&fptscl->MSE_values, ann->num_output, fptscl); clFlush(fptscl->hardware.queue); //clFinish(fptscl->hardware.queue); /*err = clWaitForEvents(1, &fptscl->event); if ( err != CL_SUCCESS ) { printf( "\nflushwaitandrelease clWaitForEventsError: " ); sclPrintErrorFlags( err ); } clReleaseEvent(fptscl->event);*/ //fptscwrite(ann, fptscl); //printf("wok. Enter RPROP train. fptscl->software_mulsum = %s, %d\n", fptscl->software_mulsum.kernelName, fptscl->hardware.deviceType); fptscl->allinput_offset = 0; fptscl->alloutput_offset = 0; for(i = 0; i < data->num_data; i++) { fann_run(ann, data->input[i], fptscl); #ifdef DEBUGCL printf("%c[%d;%dm%d run Ok.%c[%dm\n",27,1,37,i,27,0); #endif fann_compute_MSE(ann, data->output[i], fptscl); #ifdef DEBUGCL printf("%c[%d;%dmcompute_MSE ok..%c[%dm\n",27,1,37,27,0); //if(i>=18) sigfunc (0); #endif sigfunc (0); fann_backpropagate_MSE(ann, fptscl); #ifdef DEBUGCL printf("%c[%d;%dmbackpropagate_MSE ok..%c[%dm\n",27,1,37,27,0); //1!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #endif fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1, fptscl); #ifdef DEBUGCL printf("%c[%d;%dmUpdate slopes ok---------------------------------------------------%c[%dm\n",27,1,37,27,0); #endif clFlush(fptscl->hardware.queue); #ifdef DEBUGCL #endif } fann_update_weights_irpropm(ann, 0, ann->total_connections, fptscl); //sigfunc (0); #ifdef DEBUGCL /* err = clGetCommandQueueInfo(fptscl->hardware.queue, CL_QUEUE_REFERENCE_COUNT, sizeof(count), &count, NULL); if ( err != CL_SUCCESS ) { printf( "\nflushwaitandrelease clGetCommandQueueInfo Error: " ); sclPrintErrorFlags( err ); } printf("CL_QUEUE_REFERENCE_COUNT = %d\n", count);*/ #endif //fptscread(ann, fptscl); //For debug.1!! #ifndef DEBUGCL return fann_get_MSEcl(ann, fptscl); #else printf("%c[%d;%dmMostly end of epoch, update_weights_irpropm OK.------------------------------------------%c[%dm\n",27,1,37,27,0); return fann_get_MSE(ann); #endif }