/** * @method rspamd_fann:train(inputs, outputs) * Trains neural network with samples. Inputs and outputs should be tables of * equal size, each row in table should be N inputs and M outputs, e.g. * {0, 1, 1} -> {0} * @param {table} inputs input samples * @param {table} outputs output samples * @return {number} number of samples learned */ static gint lua_fann_train (lua_State *L) { #ifndef WITH_FANN return 0; #else struct fann *f = rspamd_lua_check_fann (L, 1); guint ninputs, noutputs, j; fann_type *cur_input, *cur_output; gboolean ret = FALSE; if (f != NULL) { /* First check sanity, call for table.getn for that */ ninputs = rspamd_lua_table_size (L, 2); noutputs = rspamd_lua_table_size (L, 3); if (ninputs != fann_get_num_input (f) || noutputs != fann_get_num_output (f)) { msg_err ("bad number of inputs(%d, expected %d) and " "output(%d, expected %d) args for train", ninputs, fann_get_num_input (f), noutputs, fann_get_num_output (f)); } else { cur_input = g_malloc (ninputs * sizeof (fann_type)); for (j = 0; j < ninputs; j ++) { lua_rawgeti (L, 2, j + 1); cur_input[j] = lua_tonumber (L, -1); lua_pop (L, 1); } cur_output = g_malloc (noutputs * sizeof (fann_type)); for (j = 0; j < noutputs; j++) { lua_rawgeti (L, 3, j + 1); cur_output[j] = lua_tonumber (L, -1); lua_pop (L, 1); } fann_train (f, cur_input, cur_output); g_free (cur_input); g_free (cur_output); ret = TRUE; } } lua_pushboolean (L, ret); return 1; #endif }
void vTrainThread::run(){ results.clear(); const unsigned int num_input = fann_get_num_input(neural); //Готовим выборку для шага обучения float *data =new float[num_input]; for(int i = 0;i<steps; i++ ){ struct train_result step; memset(&step, 0, sizeof(train_result)); signal->logMessage(DEBUG, QString("Step%1").arg(i)); memset(data, 0, num_input*sizeof(float)); float desired_output = 0.0; QByteArray input = buffer->getBuffer(num_input); QByteArray diff = buffer->getDiff(num_input); for(int i = 0; i<input.size(); i++){ data[i] = static_cast<float>(input.at(i)); desired_output += abs(static_cast<float>(diff.at(i))); } signal->logMessage(DEBUG, QString(" Diff Sum: %1").arg(desired_output)); desired_output /=255; step.need_result = desired_output; signal->logMessage(DEBUG, QString(" need output: %1").arg(desired_output)); float* var = fann_run(neural, data); step.output_before_train = *var; signal->logMessage(DEBUG, QString(" value before: %1").arg(*var)); fann_train(neural, data, &desired_output); var = fann_run(neural, data); step.output_after_train = desired_output; signal->logMessage(DEBUG, QString(" value after: %1").arg(*var)); step.error1 = (step.need_result == 0 && step.output_before_train != 0) ? true : false; step.error2 = (step.need_result != 0 && step.output_before_train == 0) ? 1 : 0; results.append(step); } }
/* * Internal train function */ float fann_train_epoch_incremental(struct fann *ann, struct fann_train_data *data) { unsigned int i; fann_reset_MSE(ann); for(i = 0; i != data->num_data; i++) { fann_train(ann, data->input[i], data->output[i]); } return fann_get_MSE(ann); }
void Trainer::Train(const InputVector<float>& input_vector, const OutputVector<float>& output_vector) { tmp_input_vector_ = input_vector; tmp_output_vector_ = output_vector; fann_train(ann_, &tmp_input_vector_[0], &tmp_output_vector_[0]); }
void ViFann::train(const float *input, const float &desiredOutput) { fann_train(mNetwork, (float*) input, (float*) &desiredOutput); }