コード例 #1
0
ファイル: mlgsl_multimin.c プロジェクト: oandrieu/ocamlgsl
CAMLprim value ml_gsl_multimin_fdfminimizer_minimum(value ox, value odx, value og, value T)
{
  gsl_multimin_fdfminimizer *t=GSLMULTIMINFDFMINIMIZER_VAL(T);
  if(Is_block(ox)) {
      value x=Unoption(ox);
      _DECLARE_VECTOR(x);
      _CONVERT_VECTOR(x);
      gsl_vector_memcpy(&v_x,  
			gsl_multimin_fdfminimizer_x(t));
  }
  if(Is_block(odx)) {
      value dx=Unoption(odx);
      _DECLARE_VECTOR(dx);
      _CONVERT_VECTOR(dx);
      gsl_vector_memcpy(&v_dx,  
			gsl_multimin_fdfminimizer_dx(t));
  }
  if(Is_block(og)) {
      value g=Unoption(og);
      _DECLARE_VECTOR(g);
      _CONVERT_VECTOR(g);
      gsl_vector_memcpy(&v_g,  
			gsl_multimin_fdfminimizer_gradient(t));
  }
  return copy_double(gsl_multimin_fdfminimizer_minimum(t));
}
コード例 #2
0
ファイル: multimin.c プロジェクト: Fudge/rb-gsl
static VALUE rb_gsl_fdfminimizer_minimum(VALUE obj)
{
  gsl_multimin_fdfminimizer *gmf = NULL;
  double min;
  Data_Get_Struct(obj, gsl_multimin_fdfminimizer, gmf);
  min = gsl_multimin_fdfminimizer_minimum(gmf);
  return rb_float_new(min);
}
コード例 #3
0
ファイル: rmap.cpp プロジェクト: brunoellll/player-git-svn
// Relax the map
void rmap_relax(rmap_t *self, int num_cycles)
{
  int i, n;
  gsl_vector *x, *y;
  gsl_multimin_function_fdf fdf;
  gsl_multimin_fdfminimizer *s;
  double step_size, tol;
  rmap_scan_t *scan;

  // HACK
  step_size = 0.01;
  tol = 0.001;
  
  // Compute number of free variables
  n = 3 * self->num_key_scans;

  // Set the initial vector
  x = gsl_vector_alloc(n);
  for (i = 0; i < self->num_scans; i++)
  {
    scan = self->scans + i;
    if (scan->index >= 0)
    {
      gsl_vector_set(x, 3 * scan->index + 0, scan->pose.pos.x);
      gsl_vector_set(x, 3 * scan->index + 1, scan->pose.pos.y);
      gsl_vector_set(x, 3 * scan->index + 2, scan->pose.rot);
    }
  }
  
  // Allocate minimizer
  fdf.f = (double (*) (const gsl_vector*, void*)) rmap_fit_f;
  fdf.df = (void (*) (const gsl_vector*, void*, gsl_vector*)) rmap_fit_df;
  fdf.fdf = (void (*) (const gsl_vector*, void*, double*, gsl_vector*)) rmap_fit_fdf;
  fdf.n = n;
  fdf.params = self;  
  s = gsl_multimin_fdfminimizer_alloc(gsl_multimin_fdfminimizer_vector_bfgs, n);

  // Initialize minimizer
  gsl_multimin_fdfminimizer_set(s, &fdf, x, step_size, tol);

  // Optimize
  for (i = 0; i < num_cycles; i++)
  {
    if (gsl_multimin_fdfminimizer_iterate(s) != GSL_SUCCESS)
      break;
  }
  
  self->relax_err = gsl_multimin_fdfminimizer_minimum(s);
  
  // Copy corrections back to data structures
  y = gsl_multimin_fdfminimizer_x(s);
  for (i = 0; i < self->num_scans; i++)
  {
    scan = self->scans + i;
    if (scan->index >= 0)
    {
      scan->delta.pos.x = gsl_vector_get(y, 3 * scan->index + 0) - scan->pose.pos.x;
      scan->delta.pos.y = gsl_vector_get(y, 3 * scan->index + 1) - scan->pose.pos.y;
      scan->delta.rot = gsl_vector_get(y, 3 * scan->index + 2) - scan->pose.rot;
    }
  }

  // Clean up
  gsl_multimin_fdfminimizer_free(s);
  gsl_vector_free(x);

  return;
}
コード例 #4
0
ファイル: example.c プロジェクト: nquesada/gupta
int
main (int argc, char *argv[])
{
  int i;
  int outp;
  int n = atoi (argv[1]);
  double x, y, z;
  gsl_vector *r = gsl_vector_alloc (3 * n);
  gsl_vector *dr = gsl_vector_alloc (3 * n);
  double uu = 0;
  double est;
  double tmp1, tmp2;
  FILE *in, *out;

  //Reading input
  in = fopen (argv[2], "r");
  if (in != NULL)
    {
      out = fopen (argv[3], "w");
      for (i = 0; i < n; i++)
	{
	  outp = fscanf (in, "%lf %lf %lf", &x, &y, &z);
	  gsl_vector_set (r, 3 * i, x);
	  gsl_vector_set (r, 3 * i + 1, y);
	  gsl_vector_set (r, 3 * i + 2, z);
	}
      fclose (in);

      //Initial energies and forces before optimization
      gupta_fdf (r, &n, &uu, dr);
      tmp1 = tmp2 = 0.0;
      for (i = 0; i < 3 * n; i++)
	{
	  tmp1 = gsl_vector_get (dr, i);
	  tmp2 += tmp1 * tmp1;
	}
      tmp2 = sqrt (tmp2);
      fprintf (stdout,
	       "\n \nInitial Energy in file %s was E=%lf and the value of the norm of the gradient was |df|=%lf\n ",
	       argv[2], uu, tmp2);



      //Setting the optimizer
      size_t iter = 0;
      int status;
      const gsl_multimin_fdfminimizer_type *T;
      gsl_multimin_fdfminimizer *s;
      gsl_multimin_function_fdf my_func;

      my_func.n = 3 * n;
      my_func.f = gupta_f;
      my_func.df = gupta_df;
      my_func.fdf = gupta_fdf;
      my_func.params = &n;

      T = gsl_multimin_fdfminimizer_vector_bfgs2;
      s = gsl_multimin_fdfminimizer_alloc (T, 3 * n);
      gsl_multimin_fdfminimizer_set (s, &my_func, r, 0.01, 1e-4);

      //The actual optimization
      do
	{
	  iter++;
	  status = gsl_multimin_fdfminimizer_iterate (s);
	  if (status)
	    break;
	  status = gsl_multimin_test_gradient (s->gradient, 1e-6);
	}
      while (status == GSL_CONTINUE && iter < 10000);


      est = gsl_multimin_fdfminimizer_minimum (s);
      gsl_vector_memcpy (r, gsl_multimin_fdfminimizer_x (s));

      //The values of the energies and forces after the optimization

      gupta_fdf (r, &n, &uu, dr);
      tmp1 = tmp2 = 0.0;
      for (i = 0; i < 3 * n; i++)
	{
	  tmp1 = gsl_vector_get (dr, i);
	  tmp2 += tmp1 * tmp1;
	}
      tmp2 = sqrt (tmp2);


      fprintf (stdout,
	       "\n \nFinal Energy in file %s was E=%lf and the value of the norm of the gradient was |df|=%lf\n ",
	       argv[3], uu, tmp2);


      for (i = 0; i < n; i++)
	{
	  fprintf (out, "%.16e %.16e %.16e\n", gsl_vector_get (r, 3 * i),
		   gsl_vector_get (r, 3 * i + 1), gsl_vector_get (r,
								  3 * i + 2));
	}
      gsl_multimin_fdfminimizer_free (s);
      gsl_vector_free (r);
    }
  else
    {
      fprintf (stdout, "The file %s was not found\n", argv[2]);
    }
  return 0;
}
コード例 #5
0
ファイル: minimizer_low.c プロジェクト: neelravi/octopus
int FC_FUNC_(oct_minimize, OCT_MINIMIZE)
     (const int *method, const int *dim, double *point, const double *step, const double *line_tol, 
      const double *tolgrad, const double *toldr, const int *maxiter, func_d f, 
      const print_f_ptr write_info, double *minimum)
{
  int iter = 0;
  int status;
  double maxgrad, maxdr;
  int i;
  double * oldpoint;
  double * grad;

  const gsl_multimin_fdfminimizer_type *T = NULL;
  gsl_multimin_fdfminimizer *s;
  gsl_vector *x;
  gsl_vector *absgrad, *absdr;
  gsl_multimin_function_fdf my_func;

  param_fdf_t p;

  p.func = f;

  oldpoint = (double *) malloc(*dim * sizeof(double));
  grad     = (double *) malloc(*dim * sizeof(double));

  my_func.f = &my_f;
  my_func.df = &my_df;
  my_func.fdf = &my_fdf;
  my_func.n = *dim;
  my_func.params = (void *) &p;

  /* Starting point */
  x = gsl_vector_alloc (*dim);
  for(i=0; i<*dim; i++) gsl_vector_set (x, i, point[i]);

  /* Allocate space for the gradient */
  absgrad = gsl_vector_alloc (*dim);
  absdr = gsl_vector_alloc (*dim);

  //GSL recommends line_tol = 0.1;
  switch(*method){
  case 1: 
    T = gsl_multimin_fdfminimizer_steepest_descent;
    break;
  case 2: 
    T = gsl_multimin_fdfminimizer_conjugate_fr;
    break;
  case 3: 
    T = gsl_multimin_fdfminimizer_conjugate_pr;
    break;
  case 4: 
    T = gsl_multimin_fdfminimizer_vector_bfgs;
    break;
  case 5: 
    T = gsl_multimin_fdfminimizer_vector_bfgs2;
    break;
  }

  s = gsl_multimin_fdfminimizer_alloc (T, *dim);

  gsl_multimin_fdfminimizer_set (s, &my_func, x, *step, *line_tol);
  do
    {
      iter++;
      for(i=0; i<*dim; i++) oldpoint[i] = point[i];

      /* Iterate */
      status = gsl_multimin_fdfminimizer_iterate (s);

      /* Get current minimum, point and gradient */
      *minimum = gsl_multimin_fdfminimizer_minimum(s);
      for(i=0; i<*dim; i++) point[i] = gsl_vector_get(gsl_multimin_fdfminimizer_x(s), i);
      for(i=0; i<*dim; i++) grad[i] = gsl_vector_get(gsl_multimin_fdfminimizer_gradient(s), i);

      /* Compute convergence criteria */
      for(i=0; i<*dim; i++) gsl_vector_set(absdr, i, fabs(point[i]-oldpoint[i]));
      maxdr = gsl_vector_max(absdr);
      for(i=0; i<*dim; i++) gsl_vector_set(absgrad, i, fabs(grad[i]));
      maxgrad = gsl_vector_max(absgrad);

      /* Print information */
      write_info(&iter, dim, minimum, &maxdr, &maxgrad, point);
      
      /* Store infomation for next iteration */
      for(i=0; i<*dim; i++) oldpoint[i] = point[i];

      if (status)
        break;

      if ( (maxgrad <= *tolgrad) || (maxdr <= *toldr) ) status = GSL_SUCCESS;
      else status = GSL_CONTINUE;
    }
  while (status == GSL_CONTINUE && iter <= *maxiter);

  if(status == GSL_CONTINUE) status = 1025;

  gsl_multimin_fdfminimizer_free (s);
  gsl_vector_free (x); gsl_vector_free(absgrad); gsl_vector_free(absdr);

  free(oldpoint);
  free(grad);

  return status;
}
コード例 #6
0
int OptimizationOptions::gslOptimize( NLSFunction *F, gsl_vector* x_vec, 
        gsl_matrix *v, IterationLogger *itLog ) {
  const gsl_multifit_fdfsolver_type *Tlm[] =
    { gsl_multifit_fdfsolver_lmder, gsl_multifit_fdfsolver_lmsder };
  const gsl_multimin_fdfminimizer_type *Tqn[] = 
    { gsl_multimin_fdfminimizer_vector_bfgs,
      gsl_multimin_fdfminimizer_vector_bfgs2, 
      gsl_multimin_fdfminimizer_conjugate_fr,
      gsl_multimin_fdfminimizer_conjugate_pr };
  const gsl_multimin_fminimizer_type *Tnm[] = 
    { gsl_multimin_fminimizer_nmsimplex, gsl_multimin_fminimizer_nmsimplex2, 
      gsl_multimin_fminimizer_nmsimplex2rand };
  int gsl_submethod_max[] = { sizeof(Tlm) / sizeof(Tlm[0]),
			  sizeof(Tqn) / sizeof(Tqn[0]),
			  sizeof(Tnm) / sizeof(Tnm[0]) };  
			  
  int status, status_dx, status_grad, k;
  double g_norm, x_norm;

  /* vectorize x row-wise */
  size_t max_ind, min_ind;
  double max_val, min_val, abs_max_val = 0, abs_min_val;
  
  if (this->method < 0 || 
      this->method > sizeof(gsl_submethod_max)/sizeof(gsl_submethod_max[0]) || 
      this->submethod < 0 || 
      this->submethod > gsl_submethod_max[this->method]) {
    throw new Exception("Unknown optimization method.\n");   
  }
  
  if (this->maxiter < 0 || this->maxiter > 5000) {
    throw new Exception("opt.maxiter should be in [0;5000].\n");   
  }

  /* LM */
  gsl_multifit_fdfsolver* solverlm;
  gsl_multifit_function_fdf fdflm = { &(F->_f_ls),  &(F->_df_ls), &(F->_fdf_ls), 
                                       F->getNsq(), F->getNvar(), F };
  gsl_vector *g;

  /* QN */
  double stepqn = this->step; 
  gsl_multimin_fdfminimizer* solverqn;
  gsl_multimin_function_fdf fdfqn = { 
    &(F->_f), &(F->_df), &(F->_fdf), F->getNvar(), F };

  /* NM */
  double size;
  gsl_vector *stepnm;
  gsl_multimin_fminimizer* solvernm;
  gsl_multimin_function fnm = { &(F->_f), F->getNvar(), F };

  /* initialize the optimization method */
  switch (this->method) {
  case SLRA_OPT_METHOD_LM: /* LM */
    solverlm = gsl_multifit_fdfsolver_alloc(Tlm[this->submethod], 
                   F->getNsq(), F->getNvar());
    gsl_multifit_fdfsolver_set(solverlm, &fdflm, x_vec);
    g = gsl_vector_alloc(F->getNvar());
    break;
  case SLRA_OPT_METHOD_QN: /* QN */
    solverqn = gsl_multimin_fdfminimizer_alloc(Tqn[this->submethod], 
						F->getNvar() );
    gsl_multimin_fdfminimizer_set(solverqn, &fdfqn, x_vec, 
				  stepqn, this->tol); 
    status_dx = GSL_CONTINUE;  
    break;
  case SLRA_OPT_METHOD_NM: /* NM */
    solvernm = gsl_multimin_fminimizer_alloc(Tnm[this->submethod], F->getNvar());
    stepnm = gsl_vector_alloc(F->getNvar());
    gsl_vector_set_all(stepnm, this->step); 
    gsl_multimin_fminimizer_set( solvernm, &fnm, x_vec, stepnm );
    break;
  }

  /* optimization loop */
  Log::lprintf(Log::LOG_LEVEL_FINAL, "SLRA optimization:\n");
    
  status = GSL_SUCCESS;  
  status_dx = GSL_CONTINUE;
  status_grad = GSL_CONTINUE;  
  this->iter = 0;
  
  switch (this->method) {
  case SLRA_OPT_METHOD_LM:
    gsl_blas_ddot(solverlm->f, solverlm->f, &this->fmin);
    gsl_multifit_gradient(solverlm->J, solverlm->f, g);
    gsl_vector_scale(g, 2);
    {
      gsl_vector *g2 = gsl_vector_alloc(g->size);
      F->computeFuncAndGrad(x_vec, NULL, g2);
      gsl_vector_sub(g2, g);
      if (gsl_vector_max(g2) > 1e-10 || gsl_vector_min(g2) < -1e-10) {
        Log::lprintf(Log::LOG_LEVEL_NOTIFY,
               "Gradient error, max = %14.10f,  min = %14.10f  ...",
               gsl_vector_max(g2), gsl_vector_min(g2));
        print_vec(g2);
      }
      gsl_vector_free(g2);
    }
    if (itLog != NULL) {
      itLog->reportIteration(0, solverlm->x, this->fmin, g);
    }
    break;
  case SLRA_OPT_METHOD_QN:
    this->fmin = gsl_multimin_fdfminimizer_minimum(solverqn);
    if (itLog != NULL) {
      itLog->reportIteration(0, solverqn->x, this->fmin, solverqn->gradient);
    }
    break;
  case SLRA_OPT_METHOD_NM:
    this->fmin = gsl_multimin_fminimizer_minimum( solvernm );
    if (itLog != NULL) {
      itLog->reportIteration(this->iter, solvernm->x, this->fmin, NULL);
    }
    break;
  }

  while (status_dx == GSL_CONTINUE && 
	 status_grad == GSL_CONTINUE &&
	 status == GSL_SUCCESS &&
	 this->iter < this->maxiter) {
  	if (this->method == SLRA_OPT_METHOD_LM && this->maxx > 0) {
  	  if (gsl_vector_max(solverlm->x) > this->maxx || 
  	      gsl_vector_min(solverlm->x) < -this->maxx ){
  	    break;
	    }
	  }

    this->iter++;
    switch (this->method) {
    case SLRA_OPT_METHOD_LM: /* Levenberg-Marquardt */
      status = gsl_multifit_fdfsolver_iterate(solverlm);
      gsl_multifit_gradient(solverlm->J, solverlm->f, g);
      gsl_vector_scale(g, 2);

      /* check the convergence criteria */
      if (this->epsabs != 0 || this->epsrel != 0) {
        status_dx = gsl_multifit_test_delta(solverlm->dx, solverlm->x, 
	  				  this->epsabs, this->epsrel);
	  	} else {
	  	  status_dx = GSL_CONTINUE;
	  	}
      status_grad = gsl_multifit_test_gradient(g, this->epsgrad);
      gsl_blas_ddot(solverlm->f, solverlm->f, &this->fmin);
      if (itLog != NULL) {
        itLog->reportIteration(this->iter, solverlm->x, this->fmin, g);
      }
      break;
    case SLRA_OPT_METHOD_QN:
      status = gsl_multimin_fdfminimizer_iterate( solverqn );

      /* check the convergence criteria */
      status_grad = gsl_multimin_test_gradient(
          gsl_multimin_fdfminimizer_gradient(solverqn), this->epsgrad);
      status_dx = gsl_multifit_test_delta(solverqn->dx, solverqn->x, 
	 				 this->epsabs, this->epsrel);  		    
      this->fmin = gsl_multimin_fdfminimizer_minimum(solverqn);      
      if (itLog != NULL) {
        itLog->reportIteration(this->iter, solverqn->x, this->fmin, solverqn->gradient);
      }
      break;
    case SLRA_OPT_METHOD_NM:
      status = gsl_multimin_fminimizer_iterate( solvernm );
      /* check the convergence criteria */
      size = gsl_multimin_fminimizer_size( solvernm );
      status_dx = gsl_multimin_test_size( size, this->epsx );
      this->fmin = gsl_multimin_fminimizer_minimum( solvernm );
      if (itLog != NULL) {
        itLog->reportIteration(this->iter, solvernm->x, this->fmin, NULL);
      }
      break;
    }
  } 
  if (this->iter >= this->maxiter) {
    status = EITER;
  }

  switch (this->method) {
  case  SLRA_OPT_METHOD_LM:
    gsl_vector_memcpy(x_vec, solverlm->x);
    if (v != NULL) {
      gsl_multifit_covar(solverlm->J, this->epscov, v); /* ??? Different eps */
    }
    gsl_blas_ddot(solverlm->f, solverlm->f, &this->fmin);
    break;
  case SLRA_OPT_METHOD_QN:
    gsl_vector_memcpy(x_vec, solverqn->x);
    this->fmin = solverqn->f;
    break;
  case SLRA_OPT_METHOD_NM:
    gsl_vector_memcpy(x_vec, solvernm->x);
    this->fmin = solvernm->fval;
    break;
  }
  
  /* print exit information */  
  if (Log::getMaxLevel() >= Log::LOG_LEVEL_FINAL) { /* unless "off" */
    switch (status) {
    case EITER: 
      Log::lprintf("SLRA optimization terminated by reaching " 
                  "the maximum number of iterations.\n" 
                  "The result could be far from optimal.\n");
      break;
    case GSL_ETOLF:
      Log::lprintf("Lack of convergence: "
                  "progress in function value < machine EPS.\n");
      break;
    case GSL_ETOLX:
      Log::lprintf("Lack of convergence: "
                  "change in parameters < machine EPS.\n");
      break;
    case GSL_ETOLG:
      Log::lprintf("Lack of convergence: "
                  "change in gradient < machine EPS.\n");
      break;
    case GSL_ENOPROG:
      Log::lprintf("Possible lack of convergence: no progress.\n");
      break;
    }
    
    if (status_grad != GSL_CONTINUE && status_dx != GSL_CONTINUE) {
      Log::lprintf("Optimization terminated by reaching the convergence "
                  "tolerance for both X and the gradient.\n"); 
    
    } else {
      if (status_grad != GSL_CONTINUE) {
        Log::lprintf("Optimization terminated by reaching the convergence "
	            "tolerance for the gradient.\n");
      } else {
        Log::lprintf("Optimization terminated by reaching the convergence "
                    "tolerance for X.\n");
      }
    }
  }

  /* Cleanup  */
  switch (this->method) {
  case SLRA_OPT_METHOD_LM: /* LM */
    gsl_multifit_fdfsolver_free(solverlm);
    gsl_vector_free(g);
    break;
  case SLRA_OPT_METHOD_QN: /* QN */
    gsl_multimin_fdfminimizer_free(solverqn);
    break;
  case SLRA_OPT_METHOD_NM: /* NM */
    gsl_multimin_fminimizer_free(solvernm);
    gsl_vector_free(stepnm);
    break;
  }

  return GSL_SUCCESS; /* <- correct with status */
}
コード例 #7
0
ファイル: minim.c プロジェクト: albertocbarrigon/maxdft
void minim(density_t * ndft){
  const gsl_multimin_fdfminimizer_type *T;
  gsl_multimin_fdfminimizer *s;
  gsl_multimin_function_fdf my_func;
  gsl_vector * grad_initial;
  size_t iter;

  ipf_t ipf_previous;

  int  status;
  double minimum, g_initial, norm_initial;
  char * output_string;
  int i;
  params_gsl_multimin_function_t params;

  output_string = (char *) malloc(25*sizeof(char));

  params.nDFT = density_get_val(ndft);
  my_func.n = ipf.npar;
  my_func.f = my_f;
  my_func.df = my_df;
  my_func.fdf = my_fdf;
  my_func.params = (void *) (&params);

  /* We use the BFGS algorithm from thee GNU Scientific Library (GSL)
     in its optimized version bfgs2 */
  T = gsl_multimin_fdfminimizer_vector_bfgs2;

  messages_basic("\n\nStarting the optimization.\n\n\n");


  grad_initial = gsl_vector_alloc(ipf.npar);
  my_fdf (ipf.gamma, &params, &g_initial, grad_initial);
  norm_initial = gsl_blas_dnrm2(grad_initial);
  if(g_initial < epsilon_gvalue){
    if(myid == 0) printf("The value of G for the starting gamma is %.12lg,\n", g_initial);
    if(myid == 0) printf("which is already below the requested threshold of %.12lg\n", epsilon_gvalue);
    parallel_end(EXIT_SUCCESS);
  }
  if(norm_initial < epsilon_grad){
    if(myid == 0) printf("The value of the normm of the gradient of G for the starting gamma is %.12lg,\n", norm_initial);
    if(myid == 0) printf("which is already below the requested threshold of %.12lg\n", epsilon_grad);
    parallel_end(EXIT_SUCCESS);
  }
  if(myid == 0) printf("  Starting from gamma =  ");
  if(myid == 0) {for(i = 0; i < ipf.npar; i++) printf ("%.5f ", gsl_vector_get (ipf.gamma, i));}
  if(myid == 0) printf("\n  G(gamma) = %.12lg\n", g_initial);
  if(myid == 0) printf("  ||Grad G(gamma)|| = %.12lg\n\n", norm_initial);


  /* Initialization of the minimizer s for the function
     my_func starting at the x point
     The step for the first try is 0.1
     and the convergence criteria is 0.1 */
  messages_basic("\n\nInitialization of the minimizer.\n\n\n");
  s = gsl_multimin_fdfminimizer_alloc (T, ipf.npar);
  gsl_multimin_fdfminimizer_set (s, &my_func, ipf.gamma, 0.1, 0.1);

  minimum = g_initial;
  iter = 0;
  do{
    iter++;

    ipf_copy(&ipf_previous, &ipf);

    if(myid == 0) printf("  Iter = %d\n", (int)iter);
    if(myid == 0) printf("    starting gamma =  ");
    if(myid == 0) {for(i = 0; i < ipf.npar; i++) printf ("%.5f ", gsl_vector_get (gsl_multimin_fdfminimizer_x(s), i));}
    if(myid == 0) printf("\n    starting G(gamma) = %15.10f\n", minimum);
    /* We make an iteration of the minimizer s */
    status = gsl_multimin_fdfminimizer_iterate (s);
    minimum = gsl_multimin_fdfminimizer_minimum(s);

    if (status){
      if(myid == 0) printf("  Breaking. Reason: %s\n", gsl_strerror(status));
      break;
    }

    if(myid == 0) printf("    ||Grad G(gamma)|| = %15.10f\n", gsl_blas_dnrm2(gsl_multimin_fdfminimizer_gradient(s)));
    if(myid == 0) printf("    G(gamma) = %15.10f\n", minimum);

    /* Checks convergence */
    if(minimum < epsilon_gvalue){
       status = GSL_SUCCESS;
    }else{
       status = gsl_multimin_test_gradient (gsl_multimin_fdfminimizer_gradient(s), epsilon_grad);
    }

    gsl_vector_memcpy(ipf.gamma, gsl_multimin_fdfminimizer_x(s));
    ipf_write(ipf, ipf_ref, "intermediate-pot");


    /* Print the diff between initial and reference IPFs */
    if(myid == 0) printf("    Diff[ IPF(previous), IPF(new) ] = %.12lg\n", ipf_diff(&ipf_previous, &ipf));
    ipf_end(&ipf_previous);

  }
  while (status == GSL_CONTINUE && iter < minim_maxiter);

  if(myid == 0) printf("\n\nFinished optimization. status = %d (%s)\n", status, gsl_strerror(status));
  if(myid == 0) printf("  Final gamma =  ");
  if(myid == 0) {for(i = 0; i < ipf.npar; i++) printf ("%.5f ", gsl_vector_get (gsl_multimin_fdfminimizer_x(s), i));}
  if(myid == 0) printf("\n  With value: G(gamma) = %.12lg\n\n", gsl_multimin_fdfminimizer_minimum(s));

  gsl_vector_memcpy(ipf.gamma, gsl_multimin_fdfminimizer_x(s));
  sprintf(output_string, "pot");
  ipf_write(ipf, ipf_ref, output_string);
  gsl_multimin_fdfminimizer_free (s);
  fflush(stdout);

}
コード例 #8
0
ファイル: GslOptimizer.C プロジェクト: pbauman/queso
void GslOptimizer::minimize_with_gradient( unsigned int dim, OptimizerMonitor* monitor )
{
  // Set initial point
  gsl_vector * x = gsl_vector_alloc(dim);
  for (unsigned int i = 0; i < dim; i++) {
    gsl_vector_set(x, i, (*m_initialPoint)[i]);
  }

  // Tell GSL which solver we're using
  const gsl_multimin_fdfminimizer_type* type = NULL;

  // We use m_solver_type here because we need the enum
  switch(m_solver_type)
    {
    case(FLETCHER_REEVES_CG):
      type = gsl_multimin_fdfminimizer_conjugate_fr;
      break;
    case(POLAK_RIBIERE_CG):
      type = gsl_multimin_fdfminimizer_conjugate_pr;
      break;
    case(BFGS):
      type = gsl_multimin_fdfminimizer_vector_bfgs;
      break;
    case(BFGS2):
      type = gsl_multimin_fdfminimizer_vector_bfgs2;
      break;
    case(STEEPEST_DESCENT):
      type = gsl_multimin_fdfminimizer_steepest_descent;
      break;
    case(NELDER_MEAD):
    case(NELDER_MEAD2):
    case(NELDER_MEAD2_RAND):
    default:
      // Wat?!
      queso_error();
    }

  // Init solver
  gsl_multimin_fdfminimizer * solver =
    gsl_multimin_fdfminimizer_alloc(type, dim);

  // Point GSL to the right functions
  gsl_multimin_function_fdf minusLogPosterior;
  minusLogPosterior.n = dim;
  minusLogPosterior.f = &c_evaluate;
  minusLogPosterior.df = &c_evaluate_derivative;
  minusLogPosterior.fdf = &c_evaluate_with_derivative;
  minusLogPosterior.params = (void *)(this);

  gsl_multimin_fdfminimizer_set(solver, &minusLogPosterior, x, getFdfstepSize(), getLineTolerance());

  int status;
  size_t iter = 0;

  do {
    iter++;
    status = gsl_multimin_fdfminimizer_iterate(solver);

    if (status) {
      if( m_objectiveFunction.domainSet().env().fullRank() == 0 )
        {
          std::cerr << "Error while GSL does optimisation. "
                    << "See below for GSL error type." << std::endl;
          std::cerr << "Gsl error: " << gsl_strerror(status) << std::endl;
        }
      break;
    }

    status = gsl_multimin_test_gradient(solver->gradient, this->getTolerance());

    if(monitor)
      {
        gsl_vector* x = gsl_multimin_fdfminimizer_x(solver);
        std::vector<double> x_min(dim);
        for( unsigned int i = 0; i < dim; i++)
          x_min[i] = gsl_vector_get(x,i);

        double f = gsl_multimin_fdfminimizer_minimum(solver);

        gsl_vector* grad = gsl_multimin_fdfminimizer_gradient(solver);
        double grad_norm = gsl_blas_dnrm2(grad);

        monitor->append( x_min, f, grad_norm );
      }

  } while ((status == GSL_CONTINUE) && (iter < this->getMaxIterations()));

  for (unsigned int i = 0; i < dim; i++) {
    (*m_minimizer)[i] = gsl_vector_get(solver->x, i);
  }

  // We're being good human beings and cleaning up the memory we allocated
  gsl_multimin_fdfminimizer_free(solver);
  gsl_vector_free(x);

  return;
}