void NeuralNetwork::trainOn(arma::mat& input, const arma::mat& output, int numIterations, int iterationsBetweenReport)
{
    if (input.n_cols != static_cast<unsigned int>(m_numNeuronsOnLayer[0]) ||
            output.n_cols != static_cast<unsigned int>(m_numNeuronsOnLayer[m_numLayers - 1]))
        throw InvalidInputException("File's input / output length doesn't match with the"
                                    "number of neurons on input / output layer.");

    if (m_featureNormalization)
        normalizeFeatures(input);

    double prevCost = computeCost(input, output, m_theta);
    double crtCost = prevCost;
    for (int iteration = 0; iteration < numIterations; ++iteration)
    {
		if (iterationsBetweenReport)
        	if (iteration % iterationsBetweenReport == 0 || iteration + 1 == numIterations)
            	std::cout << "Iteration: " << iteration << " | Cost: " << crtCost << std::endl;
        if (crtCost > prevCost)
        {
            std::cout << "The cost is increasing. Choose a smaller learning rate." << std::endl;
            return;
        }
        backprop(input, output);
        prevCost = crtCost;
        crtCost = computeCost(input, output, m_theta);
    }
}
inline void gpu_train_batch(FeedForward_Network<activation, error>& network,
    arma::Mat<float> inputs, arma::Mat<float> targets, int batch_size, float learning_rate = 0.8f, float momentum = 0.8f) {

  network.resize_activation(batch_size);
  Raw_FeedForward_Network<activation, error> raw_net = convert_to_raw(network);
  Raw_FeedForward_Network<activation, error> * d_network = network_to_gpu(raw_net);

  int batches_in_train = targets.n_rows/batch_size - 1;
  for (int i = 0; i < batches_in_train; ++i) {
    arma::Mat<float> input_slice = inputs.rows(i*batch_size, (i+1) * batch_size - 1);

    Raw_Matrix raw_input = to_raw(input_slice);
    Raw_Matrix * d_input = matrix_to_gpu(raw_input);
    int num_trials = input_slice.n_rows;

    calculate_activation(num_trials, network.layer_sizes, d_network, d_input);
    //TODO make this memory shared as to not realloc
    free_gpu_matrix(d_input);

    arma::Mat<float> targets_slice = targets.rows(i*batch_size, (i+1) * batch_size - 1);

    Raw_Matrix raw_targets = to_raw(targets_slice);
    Raw_Matrix * d_targets = matrix_to_gpu(raw_targets);

    backprop(num_trials, network.layer_sizes, d_network, d_targets, learning_rate, momentum);
    free_gpu_matrix(d_targets);
  }

  network_to_cpu_free(d_network, raw_net);
  update_from_raw(network, raw_net);

}
Exemple #3
0
void run_benchmark( void *vargs ) {
  struct bench_args_t *args = (struct bench_args_t *)vargs;
  backprop( args->weights1, args->weights2, args->weights3,
            args->biases1,  args->biases2,  args->biases3,
            args->training_data, args->training_targets );
}