Vector NeuralNetwork::Run( const Vector& input ) const { assert( mHiddenLayerWeights.size() > 0 && input.size() == mHiddenLayerWeights[ 0 ].size() ); Vector hidden = FeedForward( input, mHiddenLayerBias, mHiddenLayerWeights ); Vector output = FeedForward( hidden, mOutputLayerBias, mOutputLayerWeights ); return output; }
void NetGraph::FeedForward(NetGraphNode* node) { if (!node->flag_ff_visited) { // Make sure all input nodes have valid outputs for (NetGraphConnection connection : node->input_connections) FeedForward(connection.node); PrepareNode(node); // Call the Layer::FeedForward method and set the visited flag node->layer->FeedForward(); if(layerview_enabled_) for(NetGraphBuffer buffer: node->output_buffers) { for(unsigned int sample = 0; sample < buffer.combined_tensor->data.samples(); sample++) { for(unsigned int map = 0; map < buffer.combined_tensor->data.maps(); map++) { std::stringstream ss; ss << node->unique_name << ": " << node->layer->GetLayerDescription() << ", buffer " << buffer.description; #ifdef BUILD_OPENCL buffer.combined_tensor->data.MoveToCPU(); #endif viewer.show(&(buffer.combined_tensor->data), ss.str(), false, map, sample); } } } node->flag_ff_visited = true; } }
double FFANN::TrainWithBackPropagation(Matrix input, Matrix output, double learning_rate) { std::vector<Matrix> outputs = FeedForward(input); std::vector<Matrix> temp_deltas; //layer deltas stored backwards in order //calculate cost function double cost = 0.0f; Matrix partial_cost_matrix(Dimensions[Num_Layers - 1], 1); partial_cost_matrix = output + (outputs[outputs.size() - 1] * -1); for (int i = 0; i < partial_cost_matrix.Elements.size(); i++) { cost += 0.5f * partial_cost_matrix.Elements[i] * partial_cost_matrix.Elements[i]; } //calculate last layer deltas Matrix lld(Dimensions[Num_Layers - 1], 1); lld = outputs[outputs.size() - 1] + (output * -1); for (int i = 0; i < lld.Dimensions[0]; i++) { double a = outputs[outputs.size() - 1].Elements[i]; lld.Elements[i] *= a * (1 - a); //derivative of activation function } temp_deltas.push_back(lld); //calculate the rest of the deltas through back propagation int j = 0; //this keeps track of the index for the next layer's delta for (int i = Num_Layers - 2; i >= 0; i--) //start at the second to last layer { Matrix delta(Dimensions[i], 1); delta = Weights[i + 1] * temp_deltas[j]; j++; for (int k = 0; k < delta.Dimensions[0]; k++) { double a = outputs[i].Elements[k]; delta.Elements[k] *= a * (1 - a); //derivative of activation function } temp_deltas.push_back(delta); } //put the deltas into a new vector object in the correct order std::vector<Matrix> deltas; for (int i = (int)temp_deltas.size() - 1; i >= 0; i--) { deltas.push_back(temp_deltas[i]); } //update biases for (int i = 0; i < Biases.size(); i++) { Biases[i] = Biases[i] + deltas[i] * (-1.0f * learning_rate); } //update weights for (int i = 1; i < Weights.size(); i++) { Weights[i] = Weights[i] + ((outputs[i - 1] * deltas[i].Transpose()) * (-1.0f * learning_rate)); } return cost; }
void NetGraph::FeedForward(std::vector<NetGraphNode*>& nodes, bool clear_flag) { if (clear_flag) for (NetGraphNode* node : nodes) node->flag_ff_visited = false; for (NetGraphNode* node : nodes) FeedForward(node); }
Vector MultiLayerPerceptron::Compute( const Vector& input ) const { Vector res = input; assert( mLayers.size() ); for( const Layer& layer : mLayers ) { res = FeedForward( res, layer ); } return res; }
void WordEmbedding::TrainSample(std::vector<int>& input_nodes, std::vector<std::pair<int, int> >& output_nodes, void *local_hidden_act, void *local_hidden_err) { real* hidden_act = (real*)local_hidden_act; real* hidden_err = (real*)local_hidden_err; assert(hidden_act != nullptr); assert(hidden_err != nullptr); memset(hidden_act, 0, option_->embeding_size * sizeof(real)); memset(hidden_err, 0, option_->embeding_size * sizeof(real)); FeedForward(input_nodes, hidden_act); for (int i = 0; i < output_nodes.size(); ++i) { int &node_id = output_nodes[i].first; int &code = output_nodes[i].second; BPOutputLayer(code, node_id, weight_EO_[node_id], hidden_act, hidden_err); } if (option_->use_adagrad) { //Update context embedding for (int i = 0; i < input_nodes.size(); ++i) { int &node_id = input_nodes[i]; real* input_embedding_row = weight_IE_[node_id]; real* sum_gradient2_row = sum_gradient2_IE_[node_id]; assert(input_embedding_row != nullptr && sum_gradient2_row != nullptr); for (int j = 0; j < option_->embeding_size; ++j) { sum_gradient2_row[j] += hidden_err[j] * hidden_err[j]; if (sum_gradient2_row[j] > 1e-10) input_embedding_row[j] += hidden_err[j] * option_->init_learning_rate / sqrt(sum_gradient2_row[j]); } } } else { for (int j = 0; j < option_->embeding_size; ++j) hidden_err[j] *= learning_rate; //Update context embedding for (int i = 0; i < input_nodes.size(); ++i) { int &node_id = input_nodes[i]; real* input_embedding = weight_IE_[node_id]; assert(input_embedding != nullptr); for (int j = 0; j < option_->embeding_size; ++j) input_embedding[j] += hidden_err[j]; } } }
double NeuralNetwork::EvaluateCost(std::vector<double> input, int label){ arma::vec outputNN = FeedForward(input); arma::vec trueoutput; trueoutput.zeros(neurons[neurons.size()-1]); trueoutput[label] = 1; double cost = 0; if (fCostFunction == kCE) cost = 0.5 * arma::norm(outputNN - trueoutput, 2); if (fCostFunction == kMSE) cost = arma::accu(-trueoutput % arma::log(outputNN) - (1-trueoutput) % arma::log(1-outputNN)); return cost; }
int trainNet(double *netout, double *hidout) { int i,j; struct png_d *pd; for (i=0; i<EPOCHS; i++) { for (j=0; j<N_IMGS; j++) { pd = readpngtobitmap("sample.png"); FeedForward(pd->image, pd->width, pd->height, pd->channels, pd->bit_depth, hidout, netout); //BackProp(pd->image, p[j][1], hidout, netout); free(pd->image); free(pd); } } free(netout); free(hidout); return 1; }
// ################################################ // Gives true when NN output is the same as label // bool NeuralNetwork::Evaluate(int inputNr, MNISTReader reader){ bool NeuralNetwork::Evaluate(std::vector<double> input, int label){ // std::cout << "Evaluation" << std::endl; arma::vec temp = FeedForward(input); // std::cout << temp; int counter = -2; double max = -1; for (unsigned int i = 0; i < temp.size(); ++i){ if (temp.at(i) > max){ max = temp.at(i); counter = i; } } // const int element = std::round(FeedForward(input).); if (counter == label) { // std::cout << "Correctly identified! Output element Nr " << counter << std::endl; return true; } else { // std::cout << "Wrongly identified! Output element Nr " << counter << std::endl; return false; } }
bool NeuralNet::GetNetOutput(const Type *inputs, int output_id, Type *output) { // validate output id if (output_id < 0 || output_id >= out_cnt_) { return false; } // call the fast version in case of readonly nets if (read_only_) { return FastGetNetOutput(inputs, output_id, output); } // For the slow version, we'll just call FeedForward and return the // appropriate output vector <Type> outputs(out_cnt_); if (!FeedForward(inputs, &outputs[0])) { return false; } (*output) = outputs[output_id]; return true; }
//Prueba el funcionamiento de la red void RedNeuronal::TestRed(void) { FeedForward(); }
//Entrena la red con los patrones de entrada y los patrones de salida //en modo de obtener como respuesta los patrones de salida void RedNeuronal::TrainRed(void) { FeedForward(); ComputeError(); Backpropagate(); }
void NeuralNet::Process() { FeedForward(); CalcError(); BackPropogate(); }
void MultilayerPerceptron::FeedForward(float* input_vector, float* output_vector) const { FeedForward(input_vector, output_vector, _layers.size() - 1); }
void NetGraph::FeedForward() { FeedForward(nodes_, true); }