void NeuralNetwork::CalculateNeuronActivation(size_t i){ double x = m_neurons[i].m_activesum; m_neurons[i].m_activesum = 0; // Apply the activation function double y = 0.0; switch (m_neurons[i].m_activation_function_type) { case SIGNED_SIGMOID: y = af_sigmoid_signed(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case UNSIGNED_SIGMOID: y = af_sigmoid_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case TANH: y = af_tanh(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case TANH_CUBIC: y = af_tanh_cubic(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case SIGNED_STEP: y = af_step_signed(x, m_neurons[i].m_b); break; case UNSIGNED_STEP: y = af_step_unsigned(x, m_neurons[i].m_b); break; case SIGNED_GAUSS: y = af_gauss_signed(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case UNSIGNED_GAUSS: y = af_gauss_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case ABS: y = af_abs(x, m_neurons[i].m_b); break; case SIGNED_SINE: y = af_sine_signed(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case UNSIGNED_SINE: y = af_sine_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case SIGNED_SQUARE: y = af_square_signed(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case UNSIGNED_SQUARE: y = af_square_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case LINEAR: y = af_linear(x, m_neurons[i].m_b); break; default: y = af_sigmoid_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; } m_neurons[i].m_activation = y; }
void NeuralNetwork::ActivateLeaky(double a_dtime) { // Loop connections. Calculate each connection's output signal. for (unsigned int i = 0; i < m_connections.size(); i++) { m_connections[i].m_signal = m_neurons[m_connections[i].m_source_neuron_idx].m_activation * m_connections[i].m_weight; } // Loop the connections again. This time add the signals to the target neurons. // This will largely require out of order memory writes. This is the one loop where // this will happen. for (unsigned int i = 0; i < m_connections.size(); i++) { m_neurons[m_connections[i].m_target_neuron_idx].m_activesum += m_connections[i].m_signal; } // Now we have the leaky integrator step for the neurons for (unsigned int i = m_num_inputs; i < m_neurons.size(); i++) { double t_const = a_dtime / m_neurons[i].m_timeconst; m_neurons[i].m_membrane_potential = (1.0 - t_const) * m_neurons[i].m_membrane_potential + t_const * m_neurons[i].m_activesum; } // Now loop nodes_activesums, pass the signals through the activation function // and store the result back to nodes_activations // also skip inputs since they do not get an activation for (unsigned int i = m_num_inputs; i < m_neurons.size(); i++) { double x = m_neurons[i].m_membrane_potential + m_neurons[i].m_bias; m_neurons[i].m_activesum = 0; // Apply the activation function double y = 0.0; switch (m_neurons[i].m_activation_function_type) { case SIGNED_SIGMOID: y = af_sigmoid_signed(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case UNSIGNED_SIGMOID: y = af_sigmoid_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case TANH: y = af_tanh(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case TANH_CUBIC: y = af_tanh_cubic(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case SIGNED_STEP: y = af_step_signed(x, m_neurons[i].m_b); break; case UNSIGNED_STEP: y = af_step_unsigned(x, m_neurons[i].m_b); break; case SIGNED_GAUSS: y = af_gauss_signed(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case UNSIGNED_GAUSS: y = af_gauss_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case ABS: y = af_abs(x, m_neurons[i].m_b); break; case SIGNED_SINE: y = af_sine_signed(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case UNSIGNED_SINE: y = af_sine_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case SIGNED_SQUARE: y = af_square_signed(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case UNSIGNED_SQUARE: y = af_square_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; case LINEAR: y = af_linear(x, m_neurons[i].m_b); break; default: y = af_sigmoid_unsigned(x, m_neurons[i].m_a, m_neurons[i].m_b); break; } m_neurons[i].m_activation = y; } }