示例#1
0
NeuralNet::NeuralNet(size_t no_inputs, size_t no_hidden, size_t no_outputs, ActivationFunc& hidden_act, ActivationFunc& output_act)
{
    NNLayer hidden_layer(no_inputs, no_hidden, hidden_act);
    NNLayer output_layer(no_hidden, no_outputs, output_act);
    add_layer(hidden_layer);
    add_layer(output_layer);
}
void TestWeightMatrix_Backward() {
  double epsilon = 1e-3;

  int n_outputs = 2;
  int n_inputs = 3;
  InputLayer input_layer(n_inputs);
  OutputLayer output_layer(n_outputs);

  WeightMatrix weights(input_layer, output_layer);
  weights.set(0, 0, 0.5);
  weights.set(1, 0, -2.0);
  weights.set(2, 0, 1.5);
  weights.set(0, 1, 1.0);
  weights.set(1, 1, 0.7);
  weights.set(2, 1, -1.0);
  weights.setBias(0, 0.8);
  weights.setBias(1, -0.3);

  std::vector<double> inputs;
  inputs.push_back(-2.0);
  inputs.push_back(1.0);
  inputs.push_back(3.0);
  input_layer.receiveInput(inputs);

  std::vector<double> transition = weights.fire(input_layer);
  output_layer.receiveInput(transition);

  assert(output_layer.getInput(0) == 2.3);
  assert(output_layer.getInput(1) == -4.6);

  // backpropagation step
  std::vector<double> actual_outputs;
  actual_outputs.push_back(1.0);
  actual_outputs.push_back(1.0);

  output_layer.computeGradient(actual_outputs);
  weights.computeGradient(input_layer, output_layer);

  assert(weights.getPartialDerivative(0, 0) > -1.3 - epsilon &&
         weights.getPartialDerivative(0, 0) < -1.3 + epsilon);
  assert(weights.getPartialDerivative(1, 0) > 0.65 - epsilon &&
         weights.getPartialDerivative(1, 0) < 0.65 + epsilon);
  assert(weights.getPartialDerivative(2, 0) > 1.95 - epsilon &&
         weights.getPartialDerivative(2, 0) < 1.95 + epsilon);

  assert(weights.getPartialDerivative(0, 1) > 5.6 - epsilon &&
         weights.getPartialDerivative(0, 1) < 5.6 + epsilon);
  assert(weights.getPartialDerivative(1, 1) > -2.8 - epsilon &&
         weights.getPartialDerivative(1, 1) < -2.8 + epsilon);
  assert(weights.getPartialDerivative(2, 1) > -8.4 - epsilon &&
         weights.getPartialDerivative(2, 1) < -8.4 + epsilon);

  assert(weights.getBiasPartialDerivative(0) < 0.65 + epsilon &&
         weights.getBiasPartialDerivative(0) > 0.65 - epsilon);
  assert(weights.getBiasPartialDerivative(1) < -2.8 + epsilon &&
         weights.getBiasPartialDerivative(1) > -2.8 - epsilon);

  printPass("TestWeightMatrix_Backward()");
}
示例#3
0
文件: ANN_2.cpp 项目: jefg89/neural
//------------------ANN construction------------------------------------------
void ANN (int N,float In, float * w_ji, float  * w_kj, float b, float * y_pj,float & y_pk)  {
float x_pi;

input_layer(In,x_pi);

hidden_layer(N,x_pi, w_ji, y_pj);

output_layer(N,y_pj,w_kj,b,y_pk);

}
示例#4
0
/******************************************************************************
* DXF output function.
*/
int output_dxf12_writer(FILE* dxf_file, gchar* name,
			int llx, int lly, int urx, int ury, 
			at_output_opts_type * opts,
			spline_list_array_type shape,
			at_msg_func msg_func, 
			gpointer msg_data,
			gpointer user_data)
{
  OUT_LINE ("  0");
  OUT_LINE ("SECTION");
  OUT_LINE ("  2");
  OUT_LINE ("HEADER");
  OUT_LINE ("  9");
  OUT_LINE ("$ACADVER");
  OUT_LINE ("  1");
  OUT_LINE ("AC1009");
  OUT_LINE ("  9");
  OUT_LINE ("$EXTMIN");
  OUT_LINE ("  10");
  OUT1     (" %f\n", (double)llx);
  OUT_LINE ("  20");
  OUT1     (" %f\n", (double)lly);
  OUT_LINE ("  30");
  OUT_LINE (" 0.000000");
  OUT_LINE ("  9");
  OUT_LINE ("$EXTMAX");
  OUT_LINE ("  10");
  OUT1     (" %f\n", (double)urx);
  OUT_LINE ("  20");
  OUT1     (" %f\n", (double)ury);
  OUT_LINE ("  30");
  OUT_LINE (" 0.000000");
  OUT_LINE ("  0");
  OUT_LINE ("ENDSEC");

  output_layer(dxf_file, shape);

  OUT_LINE ("  0");
  OUT_LINE ("SECTION");
  OUT_LINE ("  2");
  OUT_LINE ("ENTITIES");

  out_splines(dxf_file, shape);

  OUT_LINE ("  0");
  OUT_LINE ("ENDSEC");
  OUT_LINE ("  0");
  OUT_LINE ("EOF");
  return 0;
}
void TestHiddenLayer_Backward() {
  double epsilon = 1e-3;

  int n_outputs = 2;
  int n_inputs = 3;
  HiddenLayer hidden_layer(n_inputs);
  OutputLayer output_layer(n_outputs);

  std::vector<double> hidden_values;
  hidden_values.push_back(-2.0);
  hidden_values.push_back(1.0);
  hidden_values.push_back(3.0);

  WeightMatrix weights(hidden_layer, output_layer);
  weights.set(0, 0, 0.5);
  weights.set(1, 0, -2.0);
  weights.set(2, 0, 1.5);
  weights.set(0, 1, 1.0);
  weights.set(1, 1, 0.7);
  weights.set(2, 1, -1.0);
  weights.setBias(0, 0.8);
  weights.setBias(1, -0.3);

  hidden_layer.receiveInput(hidden_values);
  std::vector<double> transition = weights.fire(hidden_layer);
  output_layer.receiveInput(transition);

  assert(output_layer.getInput(0) == 3.3);
  assert(output_layer.getInput(1) == -2.6);

  // backpropagation step
  std::vector<double> actual_outputs;
  actual_outputs.push_back(1.0);
  actual_outputs.push_back(1.0);

  output_layer.computeGradient(actual_outputs);
  weights.computeGradient(hidden_layer, output_layer);
  hidden_layer.computeGradient(weights, output_layer);

  assert(hidden_layer.getPartialDerivative(0) < 0.0 + epsilon &&
         hidden_layer.getPartialDerivative(0) > 0.0 - epsilon);
  assert(hidden_layer.getPartialDerivative(1) < -3.56 + epsilon &&
         hidden_layer.getPartialDerivative(1) > -3.56 - epsilon);
  assert(hidden_layer.getPartialDerivative(2) < 3.525 + epsilon &&
         hidden_layer.getPartialDerivative(2) > 3.525 - epsilon);

  printPass("TestHiddenLayer_Backward()");
}
void TestWeightMatrix_Forward() {
  int n_outputs = 2;
  InputLayer input_layer(3);
  OutputLayer output_layer(n_outputs);

  WeightMatrix weights(input_layer, output_layer);
  weights.set(0, 0, 0.5);
  weights.set(1, 0, -2.0);
  weights.set(2, 0, 1.5);
  weights.set(0, 1, 1.0);
  weights.set(1, 1, 0.7);
  weights.set(2, 1, -1.0);
  weights.setBias(0, 0.8);
  weights.setBias(1, -0.3);

  std::vector<double> inputs;
  inputs.push_back(-2.0);
  inputs.push_back(1.0);
  inputs.push_back(3.0);
  input_layer.receiveInput(inputs);

  std::vector<double> transition = weights.fire(input_layer);

  assert(transition.size() == 2);
  assert(transition[0] == 2.3);
  assert(transition[1] == -4.6);

  output_layer.receiveInput(transition);

  assert(output_layer.getInput(0) == 2.3);
  assert(output_layer.getInput(1) == -4.6);

  assert(output_layer.getOutput(0) == 2.3);
  assert(output_layer.getOutput(1) == -4.6);

  printPass("TestWeightMatrix_Forward()");
}