Ejemplo n.º 1
0
dataset *load_data(char *file, fit_conf *conf) {
	dataset *res;
	FILE *fp = fopen(file, "r");
	if (!fp)
		return NULL;
	char ch;
	int sets = 1, len = 1, k, l;

	while (!feof(fp)) {
		fscanf(fp, "%c", &ch);
		if (ch == '\n')
			++len;
		else if ((ch == ' ' || ch == '\t') && len == 1)
			++sets;
	}
	fseek(fp, 0, SEEK_SET);

	res = dataset_create(sets, len);

	for (k = 0; k < len; ++k) {
		for (l = 0; l < sets; ++l) {
			fscanf(fp, "%lf", &(res->data[l][k]));
		}
	}
	fclose(fp);
	if (conf->len == 0)
		conf->len = len;
	res->len = len;
	res->x = conf->x;
	res->y = conf->y;
	res->e_x = conf->e_x;
	res->e_y = conf->e_y;
	return res;
}
Ejemplo n.º 2
0
DATASET *make_data(int points, double noise)
{
  int i;
  double x, y;
  SERIES *ser;
  DATASET *data;

  ser = series_create();
  ser->y_width = ser->x_delta = ser->y_delta = ser->offset = 1;
  ser->x_width = 2; ser->step = 3;

  /* Fill up a SERIES with 'points' patterns that consists of a single
   * input, and 'nout' outputs.  Each output is a successive delay from
   * the logistic map.
   */
  for(i = 0; i < points; i++) {
    x = random_range(-1, 1);
    y = random_range(-1, 1);
    series_append_val(ser, x);
    series_append_val(ser, y);
    series_append_val(ser, sin(5 * x * y) + y);
  }

  data = dataset_create(&dsm_series_method, ser);
  return(data);
}
Ejemplo n.º 3
0
int main(int argv, char ** argc) {


    problem_t problem = problem_create(201, 270);

	material_t ice = {
		.alpha = 1.1965*pow(10.0, -6.0),
		.rho = 917.3,
		.L = 334000.0,
		.kappa = 2.246
	};

	material_t snow = {
		.alpha = 6.6671*pow(10.0, -7.0),
		.rho = 584,
		.L = 334000.0,
		.kappa = 0.8048
	};


	problem.borders[0].position = 0.0;
	problem.borders[1].position = 2.5;
	problem.borders[2].position = 199.5;

	problem.materials[0] = ice;
	problem.materials[1] = snow;
	problem.beta = 0.3;

	// Read and create dataset
	FILE * datafile = fopen("data.csv", "r");
	if (datafile == NULL) {
		error_warning("Could not open input file. Using fake values.");
		problem.dataset = dataset_create(4);
	} else {
		problem.dataset = dataset_read(datafile, 4);
		fclose(datafile);
	}

    problem_print_header(&problem);
    problem_iterate(&problem, (unsigned)(24*8.64*pow(10.0, 7.0))); // 24 dag
	problem_destroy(&problem);

    return EXIT_SUCCESS;
}
Ejemplo n.º 4
0
DATASET *create_short_dataset(char *fname, int xdim, int ydim)
{
  DSSHORT *dsshort;
  DATASET *ds;
  FILE *fp;
  struct stat fpstat;
  unsigned bytes;

  if(stat(fname, &fpstat)) {
    perror("could not stat file: ");
    exit(1);
  }
  bytes = fpstat.st_size;
  if (bytes % ((xdim + ydim) * sizeof(short)) != 0) {
    fprintf(stderr, "file size not a multiple of"
	    "((xdim + ydim) * sizeof(short))\n");
    exit(1);
  }

  dsshort = xmalloc(sizeof(DSSHORT));
  dsshort->data = xmalloc(bytes);
  dsshort->sz = bytes / ((xdim + ydim) * sizeof(short));
  dsshort->xsz = xdim;
  dsshort->ysz = ydim;
  dsshort->whichx = dsshort->whichy = 0;
  dsshort->xbuf1 = xmalloc(sizeof(double) * xdim);
  dsshort->xbuf2 = xmalloc(sizeof(double) * xdim);
  dsshort->ybuf1 = xmalloc(sizeof(double) * ydim);
  dsshort->ybuf2 = xmalloc(sizeof(double) * ydim);

  if ((fp = fopen(fname, "r")) == NULL) {
    fprintf(stderr, "could not open '%s' for reading\n", fname);
    exit(1);
  }
  if ((fread(dsshort->data, sizeof(char), bytes, fp)) != bytes) {
    fprintf(stderr, "problems reading data from '%s'\n", fname);
    exit(1);
  }
  fclose(fp);

  ds = dataset_create(&dsshort_method, dsshort);
  return ds;
}
Ejemplo n.º 5
0
DATASET *make_data(int points)
{
  int i;
  double x1, x2;
  SERIES *ser;
  DATASET *data;

  ser = series_create();
  ser->x_width = ser->y_width = 2;
  ser->x_delta = ser->y_delta = ser->offset = 1;
  ser->step = 4;

  for(i = 0; i < points; i++) {
    x1 = random_range(-1, 1);
    x2 = random_range(-1, 1);
    series_append_val(ser, x1);
    series_append_val(ser, x2);
    series_append_val(ser, f1(x1, x2));
    series_append_val(ser, f2(x1, x2));
  }

  data = dataset_create(&dsm_series_method, ser);
  return(data);
}
Ejemplo n.º 6
0
int main(int argc, char **argv)
{
  /* These variables are for command-line options.
   */
  double mag = 1.0, etol = 10e-3, detol = 10e-8, rate = 0.1;
  int seed = 0, minepochs = 10, maxepochs = 100;
  char *afunc = "tanh", *alg = "cgpr", *srch = "cubic";

  /* The OPTION array is used to easily parse command-line options.
   */
  OPTION opts[] = {
    { "-seed",      OPT_INT,    &seed,      "random number seed"           },
    { "-minepochs", OPT_INT,    &minepochs, "minimum # of training steps"  },
    { "-maxepochs", OPT_INT,    &maxepochs, "maximum # of training steps"  },
    { "-afunc",     OPT_STRING, &afunc,     "act. function for hidden node"},
    { "-mag",       OPT_DOUBLE, &mag,       "max size of initial weights"  },
    { "-etol",      OPT_DOUBLE, &etol,      "error tolerance"              },
    { "-detol",     OPT_DOUBLE, &detol,     "delta error tolerance"        },
    { "-rate",      OPT_DOUBLE, &rate,      "learning rate"                },
    { "-alg",       OPT_STRING, &alg,       "training algorithm"           },
    { "-srch",      OPT_STRING, &srch,      "line search"                  },
    { NULL,         OPT_NULL,   NULL,       NULL                           }
  };

  /* The DATASET and the NN that we will use.
   */
  DATASET *data;
  NN *nn;


  /* Get the command-line options.
   */
  get_options(argc, argv, opts, help_string, NULL, 0);

  /* Set the random seed.
   */
  srandom(seed);

  nn = nn_create("4 2 4");   /* 2-2-1 architecture. */
  nn_link(nn, "0 -l-> 1");   /* Inputs to hidden link. */
  nn_link(nn, "1 -l-> 2");   /* Hidden to output link. */

  /* Set the Activation functions of the hidden and output layers and
   * initialize the weights to uniform random values between -/+mag.
   */
  nn_set_actfunc(nn, 1, 0, afunc);
  nn_set_actfunc(nn, 2, 0, "logistic");
  nn_init(nn, mag);
 
  /* Convert the C matrix into a DATASET.  There are two inputs, one
   * output, and four patterns total.
   */
  data = dataset_create(&dsm_matrix_method,
			dsm_c_matrix(&rawdata[0][0], 4, 4, 4));

  /* Tell the NN how to train itself.
   */
  nn->info.train_set = data;
  nn->info.opt.min_epochs = minepochs;
  nn->info.opt.max_epochs = maxepochs;
  nn->info.opt.error_tol = etol;
  nn->info.opt.delta_error_tol = detol;
  nn->info.opt.hook = training_hook;
  nn->info.opt.rate = rate;

  if(strcmp(srch, "hybrid") == 0)
    nn->info.opt.stepf = opt_lnsrch_hybrid;
  else if(strcmp(srch, "golden") == 0)
    nn->info.opt.stepf = opt_lnsrch_golden;
  else if(strcmp(srch, "cubic") == 0)
    nn->info.opt.stepf = opt_lnsrch_cubic;
  else if(strcmp(srch, "none") == 0)
    nn->info.opt.stepf = NULL;
  
  if(strcmp(alg, "cgpr") == 0)
    nn->info.opt.engine = opt_conjgrad_pr;
  else if(strcmp(alg, "cgfr") == 0)
    nn->info.opt.engine = opt_conjgrad_fr;
  else if(strcmp(alg, "qndfp") == 0)
    nn->info.opt.engine = opt_quasinewton_dfp;
  else if(strcmp(alg, "qnbfgs") == 0)
    nn->info.opt.engine = opt_quasinewton_bfgs;
  else if(strcmp(alg, "lm") == 0)
    nn->info.opt.engine = opt_levenberg_marquardt;
  else if(strcmp(alg, "bp") == 0) {
    nn->info.opt.engine = opt_gradient_descent;
    nn->info.opt.stepf = NULL;
    nn->info.subsample = 1;
    nn->info.opt.stepf = nn_lnsrch_search_then_converge;
    nn->info.opt.momentum = 0.9;
    nn->info.stc_eta_0 = 1;
    nn->info.stc_tau = 100;
  }

  /* Do the training.  This will print out the epoch number and
   * The error level until trianing halts via one of the stopping
   * criterion.
   */
  nn_train(nn);

  /* Print out each input training pattern and the respective
   * NN output.
   */
  printf("--------------------\n");
  nn_offline_test(nn, data, testing_hook);

#if 1
  { 
    const double dw = 0.000001;
    double jj1, jj2, *Rg, Rin[4], Rdout[4], dedy[4], err;
    int j, k, l, n = nn->numweights;
    Rg = allocate_array(1, sizeof(double), nn->numweights);
    nn->need_all_grads = 1;
    for(k = 0; k < 4; k++) {
      
      nn_forward(nn, &rawdata[k][0]);
      for(l = 0; l < nn->numout; l++)
	dedy[l] = nn->y[l] - rawdata[k][l];
      nn_backward(nn, dedy);
      for(l = 0; l < nn->numout; l++)
	/* Fixed */
	Rin[l] =  nn->dx[l] - dedy[l];

      nn_Rforward(nn, Rin, NULL);
      for(l = 0; l < nn->numout; l++)
	/* Fixed */
        Rdout[l] = nn->Ry[l] - nn->Rx[l];

      nn_Rbackward(nn, Rdout);
      nn_get_Rgrads(nn, Rg);

      for(j = 0; j < n; j++) {
	nn_forward(nn, &rawdata[k][0]);
	for(l = 0; l < nn->numout; l++)
	  dedy[l] = nn->y[l] - rawdata[k][l];
	nn_backward(nn, dedy);
	jj1 = 0;
	for(l = 0; l < nn->numout; l++)
	  jj1 += 0.5 * (dedy[l] - nn->dx[l]) * (dedy[l] - nn->dx[l]);

	*nn->weights[j] += dw;
	nn_forward(nn, &rawdata[k][0]);
	for(l = 0; l < nn->numout; l++)
	  dedy[l] = nn->y[l] - rawdata[k][l];
	nn_backward(nn, dedy);
	jj2 = 0;
	for(l = 0; l < nn->numout; l++)
	  jj2 += 0.5 * (dedy[l] - nn->dx[l]) * (dedy[l] - nn->dx[l]);
	err = fabs(Rg[j] - (jj2 - jj1) / dw) / fabs(Rg[j]);
	printf("(%d, %2d) ja = % .5e  jn = % .5e  error = % .2e  %s\n",
	       k, j, Rg[j], (jj2 - jj1) / dw,
	       err, (err > 10e-4) ? "BAD" : "GOOD");
	*nn->weights[j] -= dw;
      }
    }
  }
#endif

  /* Free up everything.
   */
  nn_destroy(nn);
  dsm_destroy_matrix(dataset_destroy(data));
  nn_shutdown();

  /* Bye.
   */
  exit(0); 
}
Ejemplo n.º 7
0
int main(int argc, char **argv)
{
  /* These variables are for command-line options.
   */
  double var = 0.0;
  int seed = 0, nbasis = 4, norm = 0;
  
  /* The OPTION array is used to easily parse command-line options.
   */
  OPTION opts[] = {
    { "-var",    OPT_DOUBLE, &var,    "variance of basis functions"  },
    { "-seed",   OPT_INT,    &seed,   "random number seed"           },
    { "-nbasis", OPT_INT,    &nbasis, "number of basis functions"    },
    { "-norm",   OPT_SWITCH, &norm,   "normalized basis functions?"  },
    { NULL,      OPT_NULL,   NULL,    NULL                           }
  };

  /* The DATASET and the NN that we will use.
   */
  SERIES *trainser, *testser;
  DATASET *trainds, *testds;
  NN *nn;


  /* Get the command-line options.
   */
  get_options(argc, argv, opts, help_string, NULL, 0);

  /* Set the random seed.
   */
  srandom(seed);

  testser =  series_read_ascii("hp41.dat");
  testser->x_width = 2;
  testser->y_width = testser->offset = testser->x_delta = testser->y_delta = 1;
  testser->step = testser->x_width + testser->y_width;
  testds = dataset_create(&dsm_series_method, testser);

  trainser =  series_read_ascii("hp21.dat");
  trainser->x_width = 2;
  trainser->y_width = trainser->offset = trainser->x_delta = trainser->y_delta = 1;
  trainser->step = trainser->x_width + trainser->y_width;
  trainds = dataset_create(&dsm_series_method, trainser);

  nn_rbf_basis_normalized = norm;
  nn = nn_create_rbf(nbasis, var, trainds);

  nn->links[0]->need_grads = 1;
  nn->info.train_set = trainds;
  nn->info.opt.min_epochs = 20;
  nn->info.opt.max_epochs = 200;
  nn->info.opt.error_tol = 1e-3;
  nn->info.opt.delta_error_tol = 1e-8;
  nn->info.opt.hook = training_hook;
  nn->info.opt.stepf = opt_lnsrch_cubic;
  nn->info.opt.engine = opt_quasinewton_bfgs;
  nn_train(nn);

  /* Now, let's see how well the RBF performs.
   */
  nn_offline_test(nn, testds, testing_hook);

  /* Free up everything.
   */
  nn_destroy(nn);
  series_destroy(dataset_destroy(testds));
  series_destroy(dataset_destroy(trainds));
  nn_shutdown();

  /* Bye.
   */
  exit(0); 
}
Ejemplo n.º 8
0
int main(int argc, char **argv)
{
  /* These variables are for command-line options.
   */
  double mag = 0.1, etol = 10e-3, detol = 10e-8;
  int seed = 0, minepochs = 10, maxepochs = 100;
  char *afunc = "tanh";

  /* The OPTION array is used to easily parse command-line options.
   */
  OPTION opts[] = {
    { "-seed",      OPT_INT,    &seed,      "random number seed"           },
    { "-minepochs", OPT_INT,    &minepochs, "minimum # of training steps"  },
    { "-maxepochs", OPT_INT,    &maxepochs, "maximum # of training steps"  },
    { "-afunc",     OPT_STRING, &afunc,     "act. function for hidden node"},
    { "-mag",       OPT_DOUBLE, &mag,       "max size of initial weights"  },
    { "-etol",      OPT_DOUBLE, &etol,      "error tolerance"              },
    { "-detol",     OPT_DOUBLE, &detol,     "delta error tolerance"        },
    { NULL,         OPT_NULL,   NULL,       NULL                           }
  };

  /* The DATASET and the NN that we will use.
   */
  DATASET *data;
  NN *nn;

  /* Set it so that xalloc_report() will print to the screen.
   */
  ulog_threshold = ULOG_DEBUG;
  
  /* Get the command-line options.
   */
  get_options(argc, argv, opts, "Train a NN on XOR data.\n");

  /* Set the random seed.
   */
  srandom(seed);

  /* Create the neural network.  This one has two inputs, one hidden node,
   * and a single output.  The input are connected to the hidden node 
   * and the outputs, while the hidden node is just connected to the
   * outputs.
   */
  nn = nn_create("2 1 1");   /* 2-1-1 architecture. */
  nn_link(nn, "0 -l-> 1");   /* Inputs to hidden link. */
  nn_link(nn, "1 -l-> 2");   /* Hidden to output link. */
  nn_link(nn, "0 -l-> 2");   /* Input to output short-circuit link. */  

  /* Set the Activation functions of the hidden and output layers and
   * initialize the weights to uniform random values between -/+mag.
   */
  nn_set_actfunc(nn, 1, 0, afunc);
  nn_set_actfunc(nn, 2, 0, "logistic");
  nn_init(nn, mag);
 
  /* Convert the C matrix into a DATASET.  There are two inputs, one
   * output, and four patterns total.
   */
  data = dataset_create(&dsm_matrix_method,
			dsm_c_matrix(&xor_data[0][0], 2, 1, 4));

  /* Tell the NN how to train itself.
   */
  nn->info.train_set = data;
  nn->info.opt.min_epochs = minepochs;
  nn->info.opt.max_epochs = maxepochs;
  nn->info.opt.error_tol = etol;
  nn->info.opt.delta_error_tol = detol;

  nn_train(nn);
  nn_offline_test(nn, data, NULL);

  nn_write(nn, "xor.net");
  nn_destroy(nn);
  nn = nn_read("xor.net");
  nn_destroy(nn);
  unlink("xor.net");

  dsm_destroy_matrix(dataset_destroy(data));
  nn_shutdown();

  xalloc_report();

  /* Bye.
   */
  exit(0); 
}
Ejemplo n.º 9
0
int main(int argc, char **argv)
{
  /* These variables are for command-line options.
   */
  double mag = 1.0, etol = 10e-3, detol = 10e-8;
  double rate = 0.1, moment = 0.9, subsamp = 0, decay = 0.9;
  int seed = 0, minepochs = 10, maxepochs = 100;
  char *afunc = "tanh";
  void *linealg = opt_lnsrch_golden, *optalg = opt_conjgrad_pr;

  OPTION_SET_MEMBER optsetm[] = {
    { "cgpr",   opt_conjgrad_pr },
    { "cgfr",   opt_conjgrad_fr },
    { "qndfp",  opt_quasinewton_dfp },
    { "qnbfgs", opt_quasinewton_bfgs },
    { "lm",     opt_levenberg_marquardt },
    { "gd",     opt_gradient_descent },
    { NULL,     NULL }
  };

  OPTION_SET_MEMBER linesetm[] = {
    { "golden", opt_lnsrch_golden },
    { "hybrid", opt_lnsrch_hybrid },
    { "cubic",  opt_lnsrch_cubic },
    { "stc",    nn_lnsrch_search_then_converge },
    { "none",   NULL },
    { NULL,     NULL }
  };

  OPTION_SET lineset = { &linealg, linesetm };
  OPTION_SET optset = { &optalg, optsetm };
    
  /* The OPTION array is used to easily parse command-line options.
   */
  OPTION opts[] = {
    { "-seed",      OPT_INT,    &seed,      "random number seed"           },
    { "-minepochs", OPT_INT,    &minepochs, "minimum # of training steps"  },
    { "-maxepochs", OPT_INT,    &maxepochs, "maximum # of training steps"  },
    { "-afunc",     OPT_STRING, &afunc,     "act. function for hidden node"},
    { "-mag",       OPT_DOUBLE, &mag,       "max size of initial weights"  },
    { "-etol",      OPT_DOUBLE, &etol,      "error tolerance"              },
    { "-detol",     OPT_DOUBLE, &detol,     "delta error tolerance"        },
    { "-rate",      OPT_DOUBLE, &rate,      "learning rate"                },
    { "-moment",    OPT_DOUBLE, &moment,    "momentum rate"                },
    { "-alg",       OPT_SET,    &optset,    "training algorithm"           },
    { "-subsamp",   OPT_DOUBLE, &subsamp,   "subsample value"  },
    { "-decay",     OPT_DOUBLE, &decay,     "stochastic decay"  },
    { "-srch",      OPT_SET,    &lineset,   "line search" },
    { NULL,         OPT_NULL,   NULL,       NULL                           }
  };

  /* The DATASET and the NN that we will use.
   */
  DATASET *data;
  NN *nn;

  /* Get the command-line options.
   */
  get_options(argc, argv, opts, help_string, NULL, 0);

  /* Set the random seed.
   */
  srandom(seed);

  /* Create the neural network.  This one has two inputs, one hidden node,
   * and a single output.  The input are connected to the hidden node 
   * and the outputs, while the hidden node is just connected to the
   * outputs.
   */
  nn = nn_create("2 1 1");   /* 2-1-1 architecture. */
  nn_link(nn, "0 -l-> 1");   /* Inputs to hidden link. */
  nn_link(nn, "1 -l-> 2");   /* Hidden to output link. */
  nn_link(nn, "0 -l-> 2");   /* Input to output short-circuit link. */  

  /* Set the Activation functions of the hidden and output layers and
   * initialize the weights to uniform random values between -/+mag.
   */
  nn_set_actfunc(nn, 1, 0, afunc);
  nn_set_actfunc(nn, 2, 0, "logistic");
  nn_init(nn, mag);
 
  /* Convert the C matrix into a DATASET.  There are two inputs, one
   * output, and four patterns total.
   */
  data = dataset_create(&dsm_matrix_method,
			dsm_c_matrix(&xor_data[0][0], 2, 1, 4));

  /* Tell the NN how to train itself.
   */
  nn->info.train_set = data;
  nn->info.opt.min_epochs = minepochs;
  nn->info.opt.max_epochs = maxepochs;
  nn->info.opt.error_tol = etol;
  nn->info.opt.delta_error_tol = detol;
  nn->info.opt.hook = training_hook;
  nn->info.opt.rate = rate;
  nn->info.opt.momentum = moment;
  nn->info.opt.decay = decay;
  nn->info.subsample = subsamp;
  if(subsamp != 0) {
    nn->info.subsample = subsamp;
    nn->info.opt.stochastic = 1;
  }
  nn->info.opt.stepf = linealg;
  nn->info.opt.engine = optalg;
  nn->info.stc_eta_0 = 1;
  nn->info.stc_tau = 100;


  /* Do the training.  This will print out the epoch number and
   * The error level until trianing halts via one of the stopping
   * criterion.
   */
  nn_train(nn);
  nn->info.subsample = 0;

  /* Print out each input training pattern and the respective
   * NN output.
   */
  printf("--------------------\n");
  nn_offline_test(nn, data, testing_hook);

  /* Free up everything.
   */
  nn_destroy(nn);
  dsm_destroy_matrix(dataset_destroy(data));
  nn_shutdown();

  /* Bye.
   */
  exit(0); 
}
Ejemplo n.º 10
0
int main(int argc, char **argv)
{
  int seed = 1, xdim = 0, ydim = 1, csz = 0, yindex = 0, ssz = 0;
  int clever = 0, regress = 0, dump = 0, best = 0, wfirst = 0, lazy = 0;
  int offset = 0, xdelta = 0, tube = 0;
  double trate = 2.0, tfinal = 10.0;
  double C = 100, aux = 0.5, tol = 1e-3, eps = 1e-12, regeps = 0.1;
  char *fname = NULL, *kname = "gauss", *dtype = "ascii";
  
  OPTION opts[] = {
    { "-dtype",  OPT_STRING,   &dtype,
      "data type. Value should be one of: "
      "ascii, double, short (short_val / 1000.0 = dbl_val), or map "
      "(memory-mapped file of doubles)" },
    { "-kernel", OPT_STRING,   &kname,
      "SVM kernel. Value should be one of: "
      "gauss, poly, tanh, or linear" },
    { "-xdim",   OPT_INT,      &xdim,
      "dimensionality of input points" },
    { "-ydim",   OPT_INT,      &ydim,
      "dimensionality of target points" },
    { "-ssz",    OPT_INT,      &ssz,
      "subset size" },
    { "-fname",  OPT_STRING,   &fname,
      "data file name" },
    { "-seed",   OPT_INT,      &seed,
      "random number seed for shuffled indices" },
    { "-C",      OPT_DOUBLE,   &C,
      "maximum size for Lagrange multipliers" },
    { "-aux",    OPT_DOUBLE,   &aux,
       "auxiliary parameter: variance for Gaussian kernels, "
      "power for polynomials, and threshold for sigmoids" },
    { "-tol",    OPT_DOUBLE,   &tol,
      "tolerance for classification errors" },
    { "-eps",    OPT_DOUBLE,   &eps,
      "floating point epsilon" },
    { "-csz",    OPT_INT,      &csz,
      "kernel output cache size" },
    { "-yindex", OPT_INT,      &yindex,
      "which y[] to classify" },
    { "-clever", OPT_SWITCH,   &clever,
      "use 'ultra clever' incremental outputs" },
    { "-best",   OPT_SWITCH,   &best,
      "use best step if relatively easy to compute" },
    { "-wfirst", OPT_SWITCH,   &wfirst,
      "Always attempt to optimize worst KKT exemplar first" },
    { "-lazy",   OPT_SWITCH,   &lazy,
      "only do a hard search over all multipliers when necessary" },
    { "-tube",   OPT_SWITCH,   &tube,
      "use tube shrinking heuristic?" },
    { "-trate",  OPT_DOUBLE,   &trate,
      "tube shrinking factor" },
    { "-tfinal", OPT_DOUBLE,   &tfinal,
      "final tube shrinkage" },
    { "-regress",OPT_SWITCH,   &regress,
      "assume this is a regression problem (not classification)" },
    { "-regeps", OPT_DOUBLE,   &regeps,
      "epsilon for regression problems" },
    { "-xdelta", OPT_INT,      &xdelta,
      "space between x's (only for time delayed time series)" },
    { "-offset", OPT_INT,      &offset,
      "space between x's and y's (only for time delayed time series)" },
    { "-dump",   OPT_SWITCH,   &dump,
      "dump SVM output to file?" },
    { NULL,      OPT_NULL,     NULL,    NULL }
  };
  
  SERIES *ser;
  DSM_FILE *dsmfile;
  DATASET *data;
  SVM *svm;
  SMORCH smorch = SMORCH_DEFAULT;
  FILE *fp;
  unsigned i, sz, j;
  double *x, *y;
  
  get_options(argc, argv, opts, NULL, NULL, 0);
  
  if(fname == NULL || xdim <= 0) {
    display_options(argv[0], opts, NULL);
    exit(1);
  }
  
  srandom(seed);

  if (!strcmp(dtype, "ascii")) {
    ser = series_read_ascii(fname);
    ser->x_width = xdim;
    ser->y_width = ydim;
    if (xdelta > 0 && offset > 0) {
      ser->x_delta = xdelta;
      ser->offset = offset;
      ser->step = 1;
    }
    else {
      ser->x_delta = ser->y_delta = ser->offset = 1;
      ser->step = ser->x_width + ser->y_width;
    }
    data = dataset_create(&dsm_series_method, ser);
  }
  else if (!strcmp(dtype, "map")) {
    dsmfile = dsm_file(fname);
    dsmfile->x_width = xdim;
    dsmfile->y_width = ydim;
    dsmfile->x_read_width = xdim * sizeof(double);
    dsmfile->y_read_width = ydim * sizeof(double);
    dsmfile->offset = dsmfile->skip = 0;
    dsmfile->step = dsmfile->x_read_width + dsmfile->y_read_width;
    dsmfile->type = SL_DOUBLE;
    dsm_file_initiate(dsmfile);
    data = dataset_create(&dsm_file_method, dsmfile);
  }
  else if (!strcmp(dtype, "double")) {
    data = create_double_dataset(fname, xdim, ydim);
  }
  else if (!strcmp(dtype, "short")) {
    data = create_short_dataset(fname, xdim, ydim);      
  }
  else {
    display_options(argv[0], opts, NULL);
    exit(1);
  }
  
  smorch.data = data;
  
   /* Set up the proper kernel to use. */
  if(!strcmp(kname, "gauss"))
    smorch.kernel = svm_kernel_gauss;
  else if(!strcmp(kname, "poly"))
    smorch.kernel = svm_kernel_poly;
  else if(!strcmp(kname, "tanh"))
    smorch.kernel = svm_kernel_tanh;
  else if(!strcmp(kname, "linear"))
    smorch.kernel = svm_kernel_linear;
  else {
    display_options(argv[0], opts, NULL);
    exit(1);
  }
  
  smorch.cache_size = csz;
  smorch.yindex = yindex;
  smorch.aux = aux;
  smorch.C = C;
  smorch.tol = tol;
  smorch.eps = eps;
  smorch.hook = myhook;
  smorch.finalhook = myfinalhook;
  smorch.subset_size = ssz;
  smorch.ultra_clever = clever;
  smorch.best_step = best;
  smorch.worst_first = wfirst;
  smorch.lazy_loop = lazy;
  smorch.regression = regress;
  smorch.regeps = regeps;
  smorch.tube = tube;
  smorch.tube_rate = trate;
  smorch.tube_final = tfinal;
  
  svm = smorch_train(&smorch);
  svm_write(svm, "tsvm.svm");

  if (dump) {
    fp = fopen("tsvm.tst", "w");
    sz = dataset_size(data);
    for (i = 0; i < sz; i++) {
      x = dataset_x(data, i);
      y = dataset_y(data, i);
      for (j = 0; j < xdim; j++)
	fprintf(fp, "% .4f ", x[j]);
      fprintf(fp, "% .4f ", y[yindex]);
      fprintf(fp, "% .4f\n", svm_output(svm, x));
    }
    fclose(fp);
  }
  
  svm_destroy(svm);
  
  exit(0);
}