double Perceptron::calculate_output(const Vector<double>& inputs) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t size = inputs.size(); const size_t inputs_number = get_inputs_number(); if(size != inputs_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Perceptron class.\n" << "double calculate_output(const Vector<double>&) const method.\n" << "Size must be equal to number of inputs.\n"; throw std::logic_error(buffer.str()); } #endif // Calculate outputs return(calculate_activation(calculate_combination(inputs))); }
inline void gpu_train_batch(FeedForward_Network<activation, error>& network, arma::Mat<float> inputs, arma::Mat<float> targets, int batch_size, float learning_rate = 0.8f, float momentum = 0.8f) { network.resize_activation(batch_size); Raw_FeedForward_Network<activation, error> raw_net = convert_to_raw(network); Raw_FeedForward_Network<activation, error> * d_network = network_to_gpu(raw_net); int batches_in_train = targets.n_rows/batch_size - 1; for (int i = 0; i < batches_in_train; ++i) { arma::Mat<float> input_slice = inputs.rows(i*batch_size, (i+1) * batch_size - 1); Raw_Matrix raw_input = to_raw(input_slice); Raw_Matrix * d_input = matrix_to_gpu(raw_input); int num_trials = input_slice.n_rows; calculate_activation(num_trials, network.layer_sizes, d_network, d_input); //TODO make this memory shared as to not realloc free_gpu_matrix(d_input); arma::Mat<float> targets_slice = targets.rows(i*batch_size, (i+1) * batch_size - 1); Raw_Matrix raw_targets = to_raw(targets_slice); Raw_Matrix * d_targets = matrix_to_gpu(raw_targets); backprop(num_trials, network.layer_sizes, d_network, d_targets, learning_rate, momentum); free_gpu_matrix(d_targets); } network_to_cpu_free(d_network, raw_net); update_from_raw(network, raw_net); }
inline arma::Mat<float> gpu_predict(FeedForward_Network<activation, error>& network, arma::Mat<float> inputs) { network.resize_activation(inputs.n_rows); Raw_FeedForward_Network<activation, error> raw_net = convert_to_raw(network); Raw_FeedForward_Network<activation, error> * d_network = network_to_gpu(raw_net); Raw_Matrix raw_inputs = to_raw(inputs); Raw_Matrix * d_inputs = matrix_to_gpu(raw_inputs); int num_trials = inputs.n_rows; calculate_activation(num_trials, network.layer_sizes, d_network, d_inputs); free_gpu_matrix(d_inputs); network_to_cpu_free(d_network, raw_net); update_from_raw(network, raw_net); return network.activations.back(); }
double Perceptron::calculate_output(const Vector<double>& inputs, const Vector<double>& parameters) const { // Control sentence (if debug) #ifdef __OPENNN_DEBUG__ const size_t inputs_size = inputs.size(); const size_t inputs_number = get_inputs_number(); if(inputs_size != inputs_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Perceptron class.\n" << "double calculate_output(const Vector<double>&, const Vector<double>&) const method.\n" << "Size of inputs must be equal to number of inputs.\n"; throw std::logic_error(buffer.str()); } const size_t parameters_size = parameters.size(); const size_t parameters_number = count_parameters_number(); if(parameters_size != parameters_number) { std::ostringstream buffer; buffer << "OpenNN Exception: Perceptron class.\n" << "double calculate_output(const Vector<double>&, const Vector<double>&) const method.\n" << "Size of potential parameters (" << parameters_size << ") must be equal to number of parameters (" << parameters_number << ").\n"; throw std::logic_error(buffer.str()); } #endif return(calculate_activation(calculate_combination(inputs, parameters))); }
double Perceptron::calculate_output(const Vector<double>& input) { // Control sentence (if debug) #ifdef _DEBUG int size = input.get_size(); if(size != inputs_number) { std::cerr << "Flood Error: Perceptron class." << std::endl << "double calculate_output(const Vector<double>&) method." << std::endl << "Size must be equal to number of inputs." << std::endl; exit(1); } #endif // Calculate output return(calculate_activation(calculate_combination(input))); }