コード例 #1
0
/* Initalize a neural network based upon the data in a prestored file (see the
    header file of this module for more info) */
NN *nn_load_from_file(FILE *file) {
    ASSERT(file);

    // parse header
    char structure[255];
    memset(structure, '\0', 255);
    if (fscanf (file, "[NN-" NN_FILE_DUMP_VERSION "<%[0123456789:]>]\r\n", structure) != 1) {
        fprintf(stderr, "Error: invalid version of neural network dump file. Expected version: %s\n", NN_FILE_DUMP_VERSION);
        return NULL;
    }

    unsigned int layer_count = 0;
    for (unsigned int i = 0; i < strlen(structure); i++) {
        if (structure[i] == ':') layer_count++;
    }
    if (layer_count == 0) {
        fprintf(stderr, "Error: could not find any layers in neural network dump file\n");
        return NULL;
    }
    layer_count++;
    unsigned int neuron_count[layer_count];
    char *offset = structure;
    for (unsigned int i = 0; i < layer_count; i++) {
        neuron_count[i] = atoi(offset);
        offset = strchr(offset, ':') + 1;
    }

    // create network
    NN *network = nn_create(layer_count, neuron_count);
    if (network == NULL) {
        return NULL;
    }
    network->layer_count = layer_count;
    unsigned int layer1, layer2, neuron1, neuron2;
    float weight, change;
    Synapse *synapse;
    while(fscanf(file, "%d:%d:%d:%d:%f:%f\r\n", &layer1, &neuron1, &layer2, &neuron2, &weight, &change) == 6) {
        nn_add_synapse(network, layer1, neuron1, layer2, neuron2);
        synapse = network->synapses[network->synapse_count - 1];
        synapse->weight = weight;
        synapse->change = change;
    }

    return network;
}
コード例 #2
0
ファイル: bots.c プロジェクト: kz04px/Bot_Project
int bot_create(s_bot *bot, float x, float y)
{
  ASSERT(bot != NULL);

  bot->x = x;
  bot->y = y;
  bot->age = 0;
  bot->size = 1;
  bot->dead = 0;
  bot->red   = RAND_BETWEEN(0.0, 1.0);
  bot->green = RAND_BETWEEN(0.0, 1.0);
  bot->blue  = RAND_BETWEEN(0.0, 1.0);
  bot->energy = BOT_START_ENERGY * RAND_BETWEEN(0.8, 1.2);
  bot->r = RAND_BETWEEN(0.0, 360.0);
  bot->turn_rate = 3.0;
  // eyes
  bot->num_eyes = 3;
  bot->eyes = (s_eye*)malloc(bot->num_eyes*sizeof(s_eye));
  bot->eyes[0].position =  15;
  bot->eyes[1].position = 345;
  bot->eyes[2].position = 180;
  bot->eyes[0].view_angle = 30;
  bot->eyes[1].view_angle = 30;
  bot->eyes[2].view_angle = 15;
  bot->eyes[0].view_distance = 2.5;
  bot->eyes[1].view_distance = 2.5;
  bot->eyes[2].view_distance = 2.5;
  // spikes
  bot->num_spikes = 1;
  bot->spikes = (s_spike*)malloc(bot->num_spikes*sizeof(s_spike));
  bot->spikes[0].position = 0;
  bot->spikes[0].length = RAND_BETWEEN(0.0, 0.2);
  // nn
  bot->nn.num_layers = 3;
  bot->nn.layer_sizes = (int*)malloc(bot->nn.num_layers*sizeof(int));
  bot->nn.layer_sizes[0] = 13;
  bot->nn.layer_sizes[1] = 7;
  bot->nn.layer_sizes[2] = 3;

  nn_create(&bot->nn);
  nn_random_weights(&bot->nn, -1.0, 1.0);

  return 0;
}
コード例 #3
0
ファイル: dwjacob.c プロジェクト: cgorman/NODElib
int main(int argc, char **argv)
{
  /* These variables are for command-line options.
   */
  double mag = 1.0, etol = 10e-3, detol = 10e-8, rate = 0.1;
  int seed = 0, minepochs = 10, maxepochs = 100;
  char *afunc = "tanh", *alg = "cgpr", *srch = "cubic";

  /* The OPTION array is used to easily parse command-line options.
   */
  OPTION opts[] = {
    { "-seed",      OPT_INT,    &seed,      "random number seed"           },
    { "-minepochs", OPT_INT,    &minepochs, "minimum # of training steps"  },
    { "-maxepochs", OPT_INT,    &maxepochs, "maximum # of training steps"  },
    { "-afunc",     OPT_STRING, &afunc,     "act. function for hidden node"},
    { "-mag",       OPT_DOUBLE, &mag,       "max size of initial weights"  },
    { "-etol",      OPT_DOUBLE, &etol,      "error tolerance"              },
    { "-detol",     OPT_DOUBLE, &detol,     "delta error tolerance"        },
    { "-rate",      OPT_DOUBLE, &rate,      "learning rate"                },
    { "-alg",       OPT_STRING, &alg,       "training algorithm"           },
    { "-srch",      OPT_STRING, &srch,      "line search"                  },
    { NULL,         OPT_NULL,   NULL,       NULL                           }
  };

  /* The DATASET and the NN that we will use.
   */
  DATASET *data;
  NN *nn;


  /* Get the command-line options.
   */
  get_options(argc, argv, opts, help_string, NULL, 0);

  /* Set the random seed.
   */
  srandom(seed);

  nn = nn_create("4 2 4");   /* 2-2-1 architecture. */
  nn_link(nn, "0 -l-> 1");   /* Inputs to hidden link. */
  nn_link(nn, "1 -l-> 2");   /* Hidden to output link. */

  /* Set the Activation functions of the hidden and output layers and
   * initialize the weights to uniform random values between -/+mag.
   */
  nn_set_actfunc(nn, 1, 0, afunc);
  nn_set_actfunc(nn, 2, 0, "logistic");
  nn_init(nn, mag);
 
  /* Convert the C matrix into a DATASET.  There are two inputs, one
   * output, and four patterns total.
   */
  data = dataset_create(&dsm_matrix_method,
			dsm_c_matrix(&rawdata[0][0], 4, 4, 4));

  /* Tell the NN how to train itself.
   */
  nn->info.train_set = data;
  nn->info.opt.min_epochs = minepochs;
  nn->info.opt.max_epochs = maxepochs;
  nn->info.opt.error_tol = etol;
  nn->info.opt.delta_error_tol = detol;
  nn->info.opt.hook = training_hook;
  nn->info.opt.rate = rate;

  if(strcmp(srch, "hybrid") == 0)
    nn->info.opt.stepf = opt_lnsrch_hybrid;
  else if(strcmp(srch, "golden") == 0)
    nn->info.opt.stepf = opt_lnsrch_golden;
  else if(strcmp(srch, "cubic") == 0)
    nn->info.opt.stepf = opt_lnsrch_cubic;
  else if(strcmp(srch, "none") == 0)
    nn->info.opt.stepf = NULL;
  
  if(strcmp(alg, "cgpr") == 0)
    nn->info.opt.engine = opt_conjgrad_pr;
  else if(strcmp(alg, "cgfr") == 0)
    nn->info.opt.engine = opt_conjgrad_fr;
  else if(strcmp(alg, "qndfp") == 0)
    nn->info.opt.engine = opt_quasinewton_dfp;
  else if(strcmp(alg, "qnbfgs") == 0)
    nn->info.opt.engine = opt_quasinewton_bfgs;
  else if(strcmp(alg, "lm") == 0)
    nn->info.opt.engine = opt_levenberg_marquardt;
  else if(strcmp(alg, "bp") == 0) {
    nn->info.opt.engine = opt_gradient_descent;
    nn->info.opt.stepf = NULL;
    nn->info.subsample = 1;
    nn->info.opt.stepf = nn_lnsrch_search_then_converge;
    nn->info.opt.momentum = 0.9;
    nn->info.stc_eta_0 = 1;
    nn->info.stc_tau = 100;
  }

  /* Do the training.  This will print out the epoch number and
   * The error level until trianing halts via one of the stopping
   * criterion.
   */
  nn_train(nn);

  /* Print out each input training pattern and the respective
   * NN output.
   */
  printf("--------------------\n");
  nn_offline_test(nn, data, testing_hook);

#if 1
  { 
    const double dw = 0.000001;
    double jj1, jj2, *Rg, Rin[4], Rdout[4], dedy[4], err;
    int j, k, l, n = nn->numweights;
    Rg = allocate_array(1, sizeof(double), nn->numweights);
    nn->need_all_grads = 1;
    for(k = 0; k < 4; k++) {
      
      nn_forward(nn, &rawdata[k][0]);
      for(l = 0; l < nn->numout; l++)
	dedy[l] = nn->y[l] - rawdata[k][l];
      nn_backward(nn, dedy);
      for(l = 0; l < nn->numout; l++)
	/* Fixed */
	Rin[l] =  nn->dx[l] - dedy[l];

      nn_Rforward(nn, Rin, NULL);
      for(l = 0; l < nn->numout; l++)
	/* Fixed */
        Rdout[l] = nn->Ry[l] - nn->Rx[l];

      nn_Rbackward(nn, Rdout);
      nn_get_Rgrads(nn, Rg);

      for(j = 0; j < n; j++) {
	nn_forward(nn, &rawdata[k][0]);
	for(l = 0; l < nn->numout; l++)
	  dedy[l] = nn->y[l] - rawdata[k][l];
	nn_backward(nn, dedy);
	jj1 = 0;
	for(l = 0; l < nn->numout; l++)
	  jj1 += 0.5 * (dedy[l] - nn->dx[l]) * (dedy[l] - nn->dx[l]);

	*nn->weights[j] += dw;
	nn_forward(nn, &rawdata[k][0]);
	for(l = 0; l < nn->numout; l++)
	  dedy[l] = nn->y[l] - rawdata[k][l];
	nn_backward(nn, dedy);
	jj2 = 0;
	for(l = 0; l < nn->numout; l++)
	  jj2 += 0.5 * (dedy[l] - nn->dx[l]) * (dedy[l] - nn->dx[l]);
	err = fabs(Rg[j] - (jj2 - jj1) / dw) / fabs(Rg[j]);
	printf("(%d, %2d) ja = % .5e  jn = % .5e  error = % .2e  %s\n",
	       k, j, Rg[j], (jj2 - jj1) / dw,
	       err, (err > 10e-4) ? "BAD" : "GOOD");
	*nn->weights[j] -= dw;
      }
    }
  }
#endif

  /* Free up everything.
   */
  nn_destroy(nn);
  dsm_destroy_matrix(dataset_destroy(data));
  nn_shutdown();

  /* Bye.
   */
  exit(0); 
}
コード例 #4
0
ファイル: hopfield.c プロジェクト: cgorman/NODElib
NN *create_hopfield_net(double gain, double tau, double dt)
{
  NN *nn;
  int sz, n, i, j, k, l, ii, jj;
  char str[80];

  /* How many neurons?  What is the N x N size? */
  sz = sizeof(task_assigment_scores) / sizeof(double);
  n = sqrt(sz);

  /* Make a two-layer net.  The first layer will be the U terms, while
   * the second layer hold the V terms.
   */
  sprintf(str, "%d %d", sz, sz);
  nn = nn_create(str);
  nn_set_actfunc(nn, 0, 0, "none");
  nn_set_actfunc(nn, 1, 0, "sigmoid");

  /* This just makes the logistic activation function behave as
   * specified in the main documentation above.
   */
#if 0
  nn->layers[1].slabs[0].aux[0] = gain;
#endif

  /* The first connection is for the T terms.  The second is for the
   * -dt * U / tau terms, and the third is just to pass V = g(U).
   */
  nn_link(nn, "1 -l-> 0");
  nn_link(nn, "0 -s-> 0");
  nn_link(nn, "0 -c-> 1");

  /* This is just a little hack to set of the T connections.  By Page
   * and Tagliarini's K-out-of-N rule, for any pair of neurons that
   * reside in the same column or row we want T[i,j] = -2.  Otherwise,
   * it whould be 0.
   *
   * The i and k indices move over rows, the j and l indices move over
   * columns, and ii and jj index the neuron as they appear in the NN.
   */

  /* For every neuron in the N x N grid...  */
  for(i = 0, ii = 0; i < n; i++)
    for(j = 0; j < n; j++, ii++)
     
      /* For every other neuron in the N x N grid... */
      for(k = 0, jj = 0; k < n; k++)
	for(l = 0; l < n; l++, jj++)
	 
	  /* Are they in the same column or Row?  If so, then make the
	   * weight a -2, but multiply it by dt as well.  Otherwise,
	   * the weight should be zero.
	   */
	  if((i == k && j != l) || (i != k && j == l))
	    nn->links[0]->u[ii][jj] = -2.0 * dt;
	  else
	    nn->links[0]->u[ii][jj] = 0.0;

  /* We don't need these, so zero them out. */
  for(i = 0; i < sz; i++)
    nn->links[0]->a[i] = 0;

  /* These next connections are for the U - dt * U / tau terms. */
  for(i = 0; i < sz; i++)
    nn->links[1]->a[i] = 1 - dt / tau;

  /* Finally, give our V terms a random initial state. */
  for(i = 0; i < sz; i++)
    nn->layers[1].y[i] = random_range(0.3, 0.7);

  return(nn);
}
コード例 #5
0
ファイル: memleak.c プロジェクト: cgorman/NODElib
int main(int argc, char **argv)
{
  /* These variables are for command-line options.
   */
  double mag = 0.1, etol = 10e-3, detol = 10e-8;
  int seed = 0, minepochs = 10, maxepochs = 100;
  char *afunc = "tanh";

  /* The OPTION array is used to easily parse command-line options.
   */
  OPTION opts[] = {
    { "-seed",      OPT_INT,    &seed,      "random number seed"           },
    { "-minepochs", OPT_INT,    &minepochs, "minimum # of training steps"  },
    { "-maxepochs", OPT_INT,    &maxepochs, "maximum # of training steps"  },
    { "-afunc",     OPT_STRING, &afunc,     "act. function for hidden node"},
    { "-mag",       OPT_DOUBLE, &mag,       "max size of initial weights"  },
    { "-etol",      OPT_DOUBLE, &etol,      "error tolerance"              },
    { "-detol",     OPT_DOUBLE, &detol,     "delta error tolerance"        },
    { NULL,         OPT_NULL,   NULL,       NULL                           }
  };

  /* The DATASET and the NN that we will use.
   */
  DATASET *data;
  NN *nn;

  /* Set it so that xalloc_report() will print to the screen.
   */
  ulog_threshold = ULOG_DEBUG;
  
  /* Get the command-line options.
   */
  get_options(argc, argv, opts, "Train a NN on XOR data.\n");

  /* Set the random seed.
   */
  srandom(seed);

  /* Create the neural network.  This one has two inputs, one hidden node,
   * and a single output.  The input are connected to the hidden node 
   * and the outputs, while the hidden node is just connected to the
   * outputs.
   */
  nn = nn_create("2 1 1");   /* 2-1-1 architecture. */
  nn_link(nn, "0 -l-> 1");   /* Inputs to hidden link. */
  nn_link(nn, "1 -l-> 2");   /* Hidden to output link. */
  nn_link(nn, "0 -l-> 2");   /* Input to output short-circuit link. */  

  /* Set the Activation functions of the hidden and output layers and
   * initialize the weights to uniform random values between -/+mag.
   */
  nn_set_actfunc(nn, 1, 0, afunc);
  nn_set_actfunc(nn, 2, 0, "logistic");
  nn_init(nn, mag);
 
  /* Convert the C matrix into a DATASET.  There are two inputs, one
   * output, and four patterns total.
   */
  data = dataset_create(&dsm_matrix_method,
			dsm_c_matrix(&xor_data[0][0], 2, 1, 4));

  /* Tell the NN how to train itself.
   */
  nn->info.train_set = data;
  nn->info.opt.min_epochs = minepochs;
  nn->info.opt.max_epochs = maxepochs;
  nn->info.opt.error_tol = etol;
  nn->info.opt.delta_error_tol = detol;

  nn_train(nn);
  nn_offline_test(nn, data, NULL);

  nn_write(nn, "xor.net");
  nn_destroy(nn);
  nn = nn_read("xor.net");
  nn_destroy(nn);
  unlink("xor.net");

  dsm_destroy_matrix(dataset_destroy(data));
  nn_shutdown();

  xalloc_report();

  /* Bye.
   */
  exit(0); 
}
コード例 #6
0
ファイル: cnls.c プロジェクト: cgorman/NODElib
int main(int argc, char **argv)
{
  /* These variables are for command-line options. */
  double noise = 0.0;
  int seed = 0, nbasis = 4, points = 100;

  /* The OPTION array is used to easily parse command-line options. */
  OPTION opts[] = {
    { "-noise",  OPT_DOUBLE, &noise,  "variance of Gaussian noise"   },
    { "-seed",   OPT_INT,    &seed,   "random number seed"           },
    { "-nbasis", OPT_INT,    &nbasis, "number of basis functions"    },
    { "-points", OPT_INT,    &points, "number of data points"        },
    { NULL,      OPT_NULL,   NULL,    NULL                           }
  };

  /* The DATASET and the NN that we will use. */
  DATASET *data;
  NN *nn;

  /* Get the command-line options.  */
  get_options(argc, argv, opts, help_string, NULL, 0);
  srandom(seed);

  /* Make the data, and build a CNLS net. */
  data = make_data(points, noise);
  nn = nn_create("2 (%d %d) %d 1", nbasis, nbasis, nbasis);
  nn_set_actfunc(nn, 1, 0, "linear");
  nn_set_actfunc(nn, 1, 1, "exp(-x)");
  nn_set_actfunc(nn, 2, 0, "linear");
  nn_set_actfunc(nn, 3, 0, "linear");

  nn_link(nn, "0 -l-> (1 0)");
  nn_link(nn, "0 -e-> (1 1)");
  nn_link(nn, "(1 1) -l-> 3");
  nn_link(nn, "(1 0) (1 1) -p-> 2");
  nn_link(nn, "2 -l-> 3");

  nn_init(nn, 1);

  nn->info.train_set = data;
  nn->info.opt.min_epochs = 10;
  nn->info.opt.max_epochs = 100;
  nn->info.opt.error_tol = 1e-5;
  nn->info.opt.delta_error_tol = 1e-7;
  nn->info.opt.hook = training_hook;
  nn_train(nn);

  /* Now, let's see how well the NN performs.
   */
  nn_offline_test(nn, data, testing_hook);

  /* Free up everything.
   */
  nn_destroy(nn);
  series_destroy(dataset_destroy(data));
  nn_shutdown();

  /* Bye.
   */
  exit(0); 
}
コード例 #7
0
ファイル: xor.c プロジェクト: cgorman/NODElib
int main(int argc, char **argv)
{
  /* These variables are for command-line options.
   */
  double mag = 1.0, etol = 10e-3, detol = 10e-8;
  double rate = 0.1, moment = 0.9, subsamp = 0, decay = 0.9;
  int seed = 0, minepochs = 10, maxepochs = 100;
  char *afunc = "tanh";
  void *linealg = opt_lnsrch_golden, *optalg = opt_conjgrad_pr;

  OPTION_SET_MEMBER optsetm[] = {
    { "cgpr",   opt_conjgrad_pr },
    { "cgfr",   opt_conjgrad_fr },
    { "qndfp",  opt_quasinewton_dfp },
    { "qnbfgs", opt_quasinewton_bfgs },
    { "lm",     opt_levenberg_marquardt },
    { "gd",     opt_gradient_descent },
    { NULL,     NULL }
  };

  OPTION_SET_MEMBER linesetm[] = {
    { "golden", opt_lnsrch_golden },
    { "hybrid", opt_lnsrch_hybrid },
    { "cubic",  opt_lnsrch_cubic },
    { "stc",    nn_lnsrch_search_then_converge },
    { "none",   NULL },
    { NULL,     NULL }
  };

  OPTION_SET lineset = { &linealg, linesetm };
  OPTION_SET optset = { &optalg, optsetm };
    
  /* The OPTION array is used to easily parse command-line options.
   */
  OPTION opts[] = {
    { "-seed",      OPT_INT,    &seed,      "random number seed"           },
    { "-minepochs", OPT_INT,    &minepochs, "minimum # of training steps"  },
    { "-maxepochs", OPT_INT,    &maxepochs, "maximum # of training steps"  },
    { "-afunc",     OPT_STRING, &afunc,     "act. function for hidden node"},
    { "-mag",       OPT_DOUBLE, &mag,       "max size of initial weights"  },
    { "-etol",      OPT_DOUBLE, &etol,      "error tolerance"              },
    { "-detol",     OPT_DOUBLE, &detol,     "delta error tolerance"        },
    { "-rate",      OPT_DOUBLE, &rate,      "learning rate"                },
    { "-moment",    OPT_DOUBLE, &moment,    "momentum rate"                },
    { "-alg",       OPT_SET,    &optset,    "training algorithm"           },
    { "-subsamp",   OPT_DOUBLE, &subsamp,   "subsample value"  },
    { "-decay",     OPT_DOUBLE, &decay,     "stochastic decay"  },
    { "-srch",      OPT_SET,    &lineset,   "line search" },
    { NULL,         OPT_NULL,   NULL,       NULL                           }
  };

  /* The DATASET and the NN that we will use.
   */
  DATASET *data;
  NN *nn;

  /* Get the command-line options.
   */
  get_options(argc, argv, opts, help_string, NULL, 0);

  /* Set the random seed.
   */
  srandom(seed);

  /* Create the neural network.  This one has two inputs, one hidden node,
   * and a single output.  The input are connected to the hidden node 
   * and the outputs, while the hidden node is just connected to the
   * outputs.
   */
  nn = nn_create("2 1 1");   /* 2-1-1 architecture. */
  nn_link(nn, "0 -l-> 1");   /* Inputs to hidden link. */
  nn_link(nn, "1 -l-> 2");   /* Hidden to output link. */
  nn_link(nn, "0 -l-> 2");   /* Input to output short-circuit link. */  

  /* Set the Activation functions of the hidden and output layers and
   * initialize the weights to uniform random values between -/+mag.
   */
  nn_set_actfunc(nn, 1, 0, afunc);
  nn_set_actfunc(nn, 2, 0, "logistic");
  nn_init(nn, mag);
 
  /* Convert the C matrix into a DATASET.  There are two inputs, one
   * output, and four patterns total.
   */
  data = dataset_create(&dsm_matrix_method,
			dsm_c_matrix(&xor_data[0][0], 2, 1, 4));

  /* Tell the NN how to train itself.
   */
  nn->info.train_set = data;
  nn->info.opt.min_epochs = minepochs;
  nn->info.opt.max_epochs = maxepochs;
  nn->info.opt.error_tol = etol;
  nn->info.opt.delta_error_tol = detol;
  nn->info.opt.hook = training_hook;
  nn->info.opt.rate = rate;
  nn->info.opt.momentum = moment;
  nn->info.opt.decay = decay;
  nn->info.subsample = subsamp;
  if(subsamp != 0) {
    nn->info.subsample = subsamp;
    nn->info.opt.stochastic = 1;
  }
  nn->info.opt.stepf = linealg;
  nn->info.opt.engine = optalg;
  nn->info.stc_eta_0 = 1;
  nn->info.stc_tau = 100;


  /* Do the training.  This will print out the epoch number and
   * The error level until trianing halts via one of the stopping
   * criterion.
   */
  nn_train(nn);
  nn->info.subsample = 0;

  /* Print out each input training pattern and the respective
   * NN output.
   */
  printf("--------------------\n");
  nn_offline_test(nn, data, testing_hook);

  /* Free up everything.
   */
  nn_destroy(nn);
  dsm_destroy_matrix(dataset_destroy(data));
  nn_shutdown();

  /* Bye.
   */
  exit(0); 
}
コード例 #8
0
ファイル: tjacob.c プロジェクト: cgorman/NODElib
int main(int argc, char **argv)
{
  /* These variables are for command-line options. */
  double var = 0.25, *x, **J, err;
  int seed = 0, nbasis = 12, points = 200, i;

  /* The OPTION array is used to easily parse command-line options. */
  OPTION opts[] = {
    { "-var",    OPT_DOUBLE, &var,    "variance of basis functions"  },
    { "-seed",   OPT_INT,    &seed,   "random number seed"           },
    { "-nbasis", OPT_INT,    &nbasis, "number of basis functions"    },
    { "-points", OPT_INT,    &points, "number of data points"        },
    { NULL,      OPT_NULL,   NULL,    NULL                           }
  };

  /* The DATASET and the NN that we will use. */
  DATASET *data;
  NN *nn;

  /* Get the command-line options. */
  get_options(argc, argv, opts, help_string, NULL, 0);

  srandom(seed);

  /* Make the data, and build an rbf from it. */
  data = make_data(points);

  nn = nn_create("2 2");
  nn_link(nn, "0 -q-> 1");
  nn_set_actfunc(nn, 1, 0, "linear");

  nn->info.train_set = data;
  nn->info.opt.min_epochs = 10;
  nn->info.opt.max_epochs = 25;
  nn->info.opt.error_tol = 10e-5;
  nn->info.opt.delta_error_tol = 10e-6;
  nn->info.opt.hook = training_hook;
  nn->info.opt.engine = opt_quasinewton_bfgs;
  nn_train(nn);

  J = allocate_array(2, sizeof(double), 2, 2);
  
  /* Now test to see of nn_jacobian() works. */
  for(i = 0; i < points; i++) {
    x = dataset_x(data, i);
    nn_jacobian(nn, x, &J[0][0]);

#if 0
    printf("% 2.2f\t% 2.2f\t% 2.2f\t% 2.2f\n", nn->x[0], nn->x[1],
	   nn->y[0], nn->y[1]);
    printf("% 2.2f\t% 2.2f\t% 2.2f\t% 2.2f\n", J[0][0], J[0][1],
	   J[1][0], J[1][1]);
    printf("% 2.2f\t% 2.2f\t% 2.2f\t% 2.2f\n", df1dx1(x[0], x[1]),
	   df1dx2(x[0], x[1]), df2dx1(x[0], x[1]), df2dx2(x[0], x[1]));
    printf("--\n");
#endif
#if 1
    err = J[0][0] - df1dx1(x[0], x[1]);
    err = err * err;
    printf("% 2.2f\t", err);

    err = J[0][1] - df1dx2(x[0], x[1]);
    err = err * err;
    printf("% 2.2f\t", err);

    err = J[1][0] - df2dx1(x[0], x[1]);
    err = err * err;
    printf("% 2.2f\t", err);

    err = J[1][1] - df2dx2(x[0], x[1]);
    err = err * err;
    printf("% 2.2f\n", err);
#endif
  }

  /* Free up everything. */
  deallocate_array(J);
  nn_destroy(nn);
  series_destroy(dataset_destroy(data));
  nn_shutdown();

  exit(0); 
}