/* * Test a set of training data and calculate the MSE */ FANN_EXTERNAL float FANN_API fann_test_data(struct fann *ann, struct fann_train_data *data) { unsigned int i; if(fann_check_input_output_sizes(ann, data) == -1) return 0; fann_reset_MSE(ann); for(i = 0; i != data->num_data; i++) { fann_test(ann, data->input[i], data->output[i]); } return fann_get_MSE(ann); }
float test_data_parallel(struct fann *ann, struct fann_train_data *data, const unsigned int threadnumb, vector< vector<fann_type> >& predicted_outputs) { if(fann_check_input_output_sizes(ann, data) == -1) return 0; predicted_outputs.resize(data->num_data,vector<fann_type> (data->num_output)); fann_reset_MSE(ann); vector<struct fann *> ann_vect(threadnumb); int i=0,j=0; //generate copies of the ann omp_set_dynamic(0); omp_set_num_threads(threadnumb); #pragma omp parallel private(j) { #pragma omp for schedule(static) for(i=0; i<(int)threadnumb; i++) { ann_vect[i]=fann_copy(ann); } //parallel computing of the updates #pragma omp for schedule(static) for(i = 0; i < (int)data->num_data; ++i) { j=omp_get_thread_num(); fann_type* temp_predicted_output=fann_test(ann_vect[j], data->input[i],data->output[i]); for(unsigned int k=0;k<data->num_output;++k) { predicted_outputs[i][k]=temp_predicted_output[k]; } } } //merge of MSEs for(i=0;i<(int)threadnumb;++i) { ann->MSE_value+= ann_vect[i]->MSE_value; ann->num_MSE+=ann_vect[i]->num_MSE; fann_destroy(ann_vect[i]); } return fann_get_MSE(ann); }
/* * Scale input and output data based on previously calculated parameters. */ FANN_EXTERNAL void FANN_API fann_descale_train( struct fann *ann, struct fann_train_data *data ) { unsigned cur_sample; if(ann->scale_mean_in == NULL) { fann_error( (struct fann_error *) ann, FANN_E_SCALE_NOT_PRESENT ); return; } /* Check that we have good training data. */ if(fann_check_input_output_sizes(ann, data) == -1) return; for( cur_sample = 0; cur_sample < data->num_data; cur_sample++ ) { fann_descale_input( ann, data->input[ cur_sample ] ); fann_descale_output( ann, data->output[ cur_sample ] ); } }
/* * Train for one epoch with the selected training algorithm */ FANN_EXTERNAL float FANN_API fann_train_epoch(struct fann *ann, struct fann_train_data *data) { if(fann_check_input_output_sizes(ann, data) == -1) return 0; switch (ann->training_algorithm) { case FANN_TRAIN_QUICKPROP: return fann_train_epoch_quickprop(ann, data); case FANN_TRAIN_RPROP: return fann_train_epoch_irpropm(ann, data); case FANN_TRAIN_SARPROP: return fann_train_epoch_sarprop(ann, data); case FANN_TRAIN_BATCH: return fann_train_epoch_batch(ann, data); case FANN_TRAIN_INCREMENTAL: return fann_train_epoch_incremental(ann, data); } return 0; }
/* * Train for one epoch with the selected training algorithm */ FANN_EXTERNAL float FANN_API fann_train_epoch(struct fann *ann, struct fann_train_data *data, struct fpts_cl *fptscl) { if(fann_check_input_output_sizes(ann, data) == -1) return 0; //sclWrite(fptscl->hardware, ann->total_connections*sizeof(fann_type), fptscl->weightscl, ann->weights); //printf("wok. Enter train. fptscl->software_mulsum = %s, %d\n", fptscl->software_mulsum.kernelName, fptscl->hardware.deviceType); switch (ann->training_algorithm) { case FANN_TRAIN_QUICKPROP: return fann_train_epoch_quickprop(ann, data); case FANN_TRAIN_RPROP: return fann_train_epoch_irpropm(ann, data, fptscl); case FANN_TRAIN_SARPROP: return fann_train_epoch_sarprop(ann, data); case FANN_TRAIN_BATCH: return fann_train_epoch_batch(ann, data); case FANN_TRAIN_INCREMENTAL: return fann_train_epoch_incremental(ann, data); } return 0; }