Пример #1
0
double _ConstOptimizer::do_optimize(unsigned int n) {
  for (unsigned int i = 0; i < n; ++i) {
    get_scoring_function()->evaluate(false);
    update_states();
  }
  return get_scoring_function()->evaluate(false);
}
Пример #2
0
void MolecularDynamics::setup(const ParticleIndexes &ps)
{
  // Get starting score and derivatives, for first dynamics step velocities
  get_scoring_function()->evaluate(true);

  setup_degrees_of_freedom(ps);
}
Пример #3
0
double HybridMonteCarlo::do_evaluate(const kernel::ParticleIndexes &) const
{
    if (get_use_incremental_scoring_function())
        IMP_THROW("Incremental scoring not supported", ModelException);
    double ekin = md_->get_kinetic_energy();
    double epot = get_scoring_function()->evaluate(false);
    return ekin + epot;
}
Пример #4
0
double GSLOptimizer::evaluate(const gsl_vector *v) {
  /* set model state */
  write_state(v);
  /* get score */
  double score= get_scoring_function()->evaluate(false);
  best_score_=std::min(score, best_score_);
  if (score < stop_score_) {
    throw AllDone();
  }
  return score;
}
Пример #5
0
double MonteCarlo::do_optimize(unsigned int max_steps) {
    IMP_OBJECT_LOG;
    IMP_CHECK_OBJECT(this);
    if (get_number_of_movers() == 0) {
        IMP_THROW("Running MonteCarlo without providing any"
                  << " movers isn't very useful.",
                  ValueException);
    }

    ParticleIndexes movable = get_movable_particles();

    // provide a way of feeding in this value
    last_energy_ = do_evaluate(movable);
    if (return_best_) {
        best_ = new Configuration(get_model());
        best_energy_ = last_energy_;
    }
    reset_statistics();
    update_states();

    IMP_LOG_TERSE("MC Initial energy is " << last_energy_ << std::endl);

    for (unsigned int i = 0; i < max_steps; ++i) {
        if (get_stop_on_good_score() &&
                get_scoring_function()->get_had_good_score()) {
            break;
        }
        do_step();
        if (best_energy_ < get_score_threshold()) break;
    }

    IMP_LOG_TERSE("MC Final energy is " << last_energy_ << std::endl);
    if (return_best_) {
        // std::cout << "Final score is " << get_model()->evaluate(false)
        //<< std::endl;
        best_->swap_configuration();
        IMP_LOG_TERSE("MC Returning energy " << best_energy_ << std::endl);
        IMP_IF_CHECK(USAGE) {
            IMP_LOG_TERSE("MC Got " << do_evaluate(get_movable_particles())
                          << std::endl);
            /*IMP_INTERNAL_CHECK((e >= std::numeric_limits<double>::max()
                                && best_energy_ >= std::numeric_limits<double>::max())
                               || std::abs(best_energy_ - e)
                               < .01+.1* std::abs(best_energy_ +e),
                               "Energies do not match "
                               << best_energy_ << " vs " << e << std::endl);*/
        }

        return do_evaluate(movable);
    } else {
        return last_energy_;
Пример #6
0
//! Perform a single dynamics step.
double MolecularDynamics::do_step(const ParticleIndexes &ps,
                                  double ts) {
  IMP_OBJECT_LOG;
  // Get coordinates at t+(delta t) and velocities at t+(delta t/2)
  propagate_coordinates(ps, ts);

  // Get derivatives at t+(delta t)
  get_scoring_function()->evaluate(true);

  // Get velocities at t+(delta t)
  propagate_velocities(ps, ts);

  return ts;
}
Пример #7
0
double GSLOptimizer::optimize(unsigned int iter,
                              const gsl_multimin_fminimizer_type*t,
                              double ms, double mxs) {
  fis_= get_optimized_attributes();
  best_score_=std::numeric_limits<double>::max();
  unsigned int n= get_dimension();
  if (n ==0) {
    IMP_LOG(TERSE, "Nothing to optimize" << std::endl);
    return get_scoring_function()->evaluate(false);
  }
  gsl_multimin_fminimizer *s=gsl_multimin_fminimizer_alloc (t, n);

  gsl_vector *x= gsl_vector_alloc(get_dimension());
  update_state(x);
  gsl_vector *ss= gsl_vector_alloc(get_dimension());
  gsl_vector_set_all(ss, mxs);

  gsl_multimin_function f= internal::create_f_function_data(this);
  gsl_multimin_fminimizer_set (s, &f, x, ss);
  try {
    int status;
    do {
      --iter;
      //update_state(x);
      status = gsl_multimin_fminimizer_iterate(s);
      if (status) {
        IMP_LOG(TERSE, "Ending optimization because of state " << s
                << std::endl);
        break;
      }
      double sz= gsl_multimin_fminimizer_size(s);
      status= gsl_multimin_test_size(sz, ms);
      update_states();
      if (status == GSL_SUCCESS) {
        IMP_LOG(TERSE, "Ending optimization because of small size " << sz
                << std::endl);
        break;
      }
    } while (status == GSL_CONTINUE && iter >0);
  } catch (AllDone){
  }
  gsl_vector *ret=gsl_multimin_fminimizer_x (s);
  best_score_=gsl_multimin_fminimizer_minimum (s);
  write_state(ret);
  gsl_multimin_fminimizer_free (s);
  gsl_vector_free (x);
  return best_score_;
}
Пример #8
0
/** \param[in] model The model to score.
    \param[in] model_data The corresponding ModelData.
    \param[in] float_indices Indices of optimizable variables.
    \param[in] x Current value of optimizable variables.
    \param[out] dscore First derivatives for current state.
    \return The model score.
 */
ConjugateGradients::NT ConjugateGradients::get_score(
    Vector<FloatIndex> float_indices, Vector<NT> &x,
    Vector<NT> &dscore) {
  int i, opt_var_cnt = float_indices.size();
  /* set model state */
  for (i = 0; i < opt_var_cnt; i++) {
    IMP_CHECK_VALUE(x[i]);
#ifdef IMP_CG_SCALE
    double v = get_scaled_value(float_indices[i]);  // scaled
#else
    double v = get_value(float_indices[i]);  // scaled
#endif
    if (std::abs(x[i] - v) > max_change_) {
      if (x[i] < v) {
        x[i] = v - max_change_;
      } else {
        x[i] = v + max_change_;
      }
    }
#ifdef IMP_CG_SCALE
    set_scaled_value(float_indices[i], x[i]);
#else
    set_value(float_indices[i], x[i]);
#endif
  }

  NT score;
  /* get score */
  try {
    score = get_scoring_function()->evaluate(true);
  }
  catch (ModelException) {
    // if we took a bad step, just return a bad score
    return std::numeric_limits<NT>::infinity();
  }
  /* get derivatives */
  for (i = 0; i < opt_var_cnt; i++) {
#ifdef IMP_CG_SCALE
    dscore[i] = get_scaled_derivative(float_indices[i]);  // scaled
#else
    dscore[i] = get_derivative(float_indices[i]);  // scaled
#endif
    IMP_USAGE_CHECK(is_good_value(dscore[i]), "Bad input to CG");
  }
  return score;
}
Пример #9
0
double GSLOptimizer::evaluate_derivative(const gsl_vector *v,
                                         gsl_vector *df) {
   /* set model state */
  write_state(v);

  /* get score */
  double score= get_scoring_function()->evaluate(true);
  best_score_=std::min(score, best_score_);
  if (score < stop_score_) {
    throw AllDone();
  }
  /* get derivatives */
  {
    for (unsigned int i=0; i< fis_.size(); ++i) {
      double d= get_scaled_derivative(fis_[i]);
      gsl_vector_set(df, i, d);
    }
  }
  return score;
}
Пример #10
0
  Float HybridMonteCarlo::get_potential_energy() const
{
    return get_scoring_function()->evaluate(false);
}
Пример #11
0
double SteepestDescent::do_optimize(unsigned int max_steps) {
  IMP_OBJECT_LOG;
  Float last_score, new_score = 0.0;

  // set up the indexes

  FloatIndexes float_indexes = get_optimized_attributes();

  Float current_step_size = step_size_;

  // ... and space for the old values
  algebra::VectorKD temp_derivs =
      algebra::get_zero_vector_kd(float_indexes.size());
  algebra::VectorKD temp_values =
      algebra::get_zero_vector_kd(float_indexes.size());

  for (unsigned int step = 0; step < max_steps; step++) {
    // model.show(std::cout);
    int cnt = 0;

    // evaluate the last model state
    last_score = get_scoring_function()->evaluate(true);
    IMP_LOG_TERSE("start score: " << last_score << std::endl);

    // store the old values
    for (unsigned int i = 0; i < temp_derivs.get_dimension(); i++) {
      temp_derivs[i] = get_derivative(float_indexes[i]);
      temp_values[i] = get_value(float_indexes[i]);
    }

    bool constant_score = false;
    while (true) {
      cnt++;
      // try new values based on moving down the gradient at the current
      // step size

      IMP_LOG_VERBOSE("step: " << temp_derivs * current_step_size << std::endl);
      for (unsigned int i = 0; i < float_indexes.size(); i++) {
        set_value(float_indexes[i],
                  temp_values[i] - temp_derivs[i] * current_step_size);
      }

      // check the new model
      new_score = get_scoring_function()->evaluate(false);
      IMP_LOG_TERSE("last score: " << last_score << "  new score: " << new_score
                                   << "  step size: " << current_step_size
                                   << std::endl);

      // if the score got better, we'll take it
      if (new_score < last_score) {
        IMP_LOG_TERSE("Accepting step of size " << current_step_size);
        update_states();
        if (new_score <= threshold_) {
          IMP_LOG_TERSE("Below threshold, returning." << std::endl);
          return new_score;
        }
        current_step_size = std::min(current_step_size * 1.4, max_step_size_);
        break;
      }

      // if the score is the same, keep going one more time
      if (std::abs(new_score - last_score) < .0000001) {
        if (constant_score) {
          break;
        }

        current_step_size *= 0.9;
        constant_score = true;
      } else {
        constant_score = false;
        current_step_size *= .7;
      }
      if (cnt > 200) {
        // stuck
        for (unsigned int i = 0; i < float_indexes.size(); i++) {
          set_value(float_indexes[i], temp_values[i]);
        }
        IMP_LOG_TERSE("Unable to find a good step. Returning" << std::endl);
        return last_score;
      }
      if (current_step_size < .00000001) {
        // here is as good as any place we found
        update_states();
        IMP_LOG_TERSE("Unable to make progress, returning." << std::endl);
        return new_score;
      }
    }
  }

  return new_score;
}
Пример #12
0
Float ConjugateGradients::do_optimize(unsigned int max_steps) {
  IMP_OBJECT_LOG;
  IMP_USAGE_CHECK(get_model(),
                  "Must set the model on the optimizer before optimizing");
  clear_range_cache();
  Vector<NT> x, dx;
  int i;
  // ModelData* model_data = get_model()->get_model_data();

  FloatIndexes float_indices = get_optimized_attributes();

  int n = float_indices.size();
  if (n == 0) {
    IMP_THROW("There are no optimizable degrees of freedom.", ModelException);
  }

  x.resize(n);
  dx.resize(n);
  // get initial state in x(n):
  for (i = 0; i < n; i++) {
#ifdef IMP_CG_SCALE
    x[i] = get_scaled_value(float_indices[i]);  // scaled
#else
    x[i] = get_value(float_indices[i]);            // scaled
#endif
    IMP_USAGE_CHECK(
        !IMP::isnan(x[i]) && std::abs(x[i]) < std::numeric_limits<NT>::max(),
        "Bad input to CG");
  }

  // Initialize optimization variables
  int ifun = 0;
  int nrst;
  NT dg1, xsq, dxsq, alpha, step, u1, u2, u3, u4;
  NT f = 0., dg = 1., w1 = 0., w2 = 0., rtst, bestf;
  bool gradient_direction;

  // dx holds the gradient at x
  // search holds the search vector
  // estimate holds the best current estimate to the minimizer
  // destimate holds the gradient at the best current estimate
  // resy holds the restart Y vector
  // ressearch holds the restart search vector
  Vector<NT> search, estimate, destimate, resy, ressearch;
  search.resize(n);
  estimate.resize(n);
  destimate.resize(n);
  resy.resize(n);
  ressearch.resize(n);

/* Calculate the function and gradient at the initial
   point and initialize nrst,which is used to determine
   whether a Beale restart is being done. nrst=n means that this
   iteration is a restart iteration. */
g20:
  f = get_score(float_indices, x, dx);
  if (get_stop_on_good_score() &&
      get_scoring_function()->get_had_good_score()) {
    estimate = x;
    goto end;
  }
  ifun++;
  nrst = n;
  // this is a gradient, not restart, direction:
  gradient_direction = true;

  /* Calculate the initial search direction, the norm of x squared,
     and the norm of dx squared. dg1 is the current directional
     derivative, while xsq and dxsq are the squared norms. */
  dg1 = xsq = 0.;

  for (i = 0; i < n; i++) {
    search[i] = -dx[i];
    xsq += x[i] * x[i];
    dg1 -= dx[i] * dx[i];
  }
  dxsq = -dg1;

  /* Test if the initial point is the minimizer. */
  if (dxsq <= cg_eps * cg_eps * std::max(NT(1.0), xsq)) {
    goto end;
  }

/* Begin the major iteration loop. */
g40:
  update_states();
  /* Begin linear search. alpha is the steplength. */

  if (gradient_direction) {
    /* This results in scaling the initial search vector to unity. */
    alpha = 1.0 / sqrt(dxsq);
  } else if (nrst == 1) {
    /* Set alpha to 1.0 after a restart. */
    alpha = 1.0;
  } else {
    /* Set alpha to the nonrestart conjugate gradient alpha. */
    alpha = alpha * dg / dg1;
  }

  /* Store current best estimate for the score */
  estimate = x;
  destimate = dx;

  /* Try to find a better score by linear search */
  if (!line_search(x, dx, alpha, float_indices, ifun, f, dg, dg1, max_steps,
                   search, estimate)) {
    /* If the line search failed, it was either because the maximum number
       of iterations was exceeded, or the minimum could not be found */
    if (static_cast<unsigned int>(ifun) > max_steps) {
      goto end;
    } else if (gradient_direction) {
      goto end;
    } else {
      goto g20;
    }
  }

  /* THE LINE SEARCH HAS CONVERGED. TEST FOR CONVERGENCE OF THE ALGORITHM. */
  dxsq = xsq = 0.0;
  for (i = 0; i < n; i++) {
    dxsq += dx[i] * dx[i];
    xsq += x[i] * x[i];
  }
  if (dxsq < threshold_) {
    goto end;
  }

  /* Search continues. Set search(i)=alpha*search(i),the full step vector. */
  for (i = 0; i < n; i++) {
    search[i] *= alpha;
  }

  /* COMPUTE THE NEW SEARCH VECTOR;
     TEST IF A POWELL RESTART IS INDICATED. */
  rtst = 0.;
  for (i = 0; i < n; ++i) {
    rtst += dx[i] * destimate[i];
  }

  if (std::abs(rtst / dxsq) > .2) {
    nrst = n;
  }

  /* If a restart is indicated, save the current d and y
     as the Beale restart vectors and save d'y and y'y
     in w1 and w2. */
  if (nrst == n) {
    ressearch = search;
    w1 = w2 = 0.;
    for (i = 0; i < n; i++) {
      resy[i] = dx[i] - destimate[i];
      w1 += resy[i] * resy[i];
      w2 += search[i] * resy[i];
    }
  }

  /* CALCULATE THE RESTART HESSIAN TIMES THE CURRENT GRADIENT. */
  u1 = u2 = 0.0;
  for (i = 0; i < n; i++) {
    u1 -= ressearch[i] * dx[i] / w1;
    u2 += ressearch[i] * dx[i] * 2.0 / w2 - resy[i] * dx[i] / w1;
  }
  u3 = w2 / w1;
  for (i = 0; i < n; i++) {
    estimate[i] = -u3 * dx[i] - u1 * resy[i] - u2 * ressearch[i];
  }

  /* If this is a restart iteration, estimate contains the new search vector. */
  if (nrst != n) {

    /* NOT A RESTART ITERATION. CALCULATE THE RESTART HESSIAN
       TIMES THE CURRENT Y. */
    u1 = u2 = u3 = 0.0;
    for (i = 0; i < n; i++) {
      u1 -= (dx[i] - destimate[i]) * ressearch[i] / w1;
      u2 = u2 - (dx[i] - destimate[i]) * resy[i] / w1 +
           2.0 * ressearch[i] * (dx[i] - destimate[i]) / w2;
      u3 += search[i] * (dx[i] - destimate[i]);
    }
    step = u4 = 0.;
    for (i = 0; i < n; i++) {
      step =
          (w2 / w1) * (dx[i] - destimate[i]) + u1 * resy[i] + u2 * ressearch[i];
      u4 += step * (dx[i] - destimate[i]);
      destimate[i] = step;
    }

    /* CALCULATE THE DOUBLY UPDATED HESSIAN TIMES THE CURRENT
       GRADIENT TO OBTAIN THE SEARCH VECTOR. */
    u1 = u2 = 0.0;
    for (i = 0; i < n; i++) {
      u1 -= search[i] * dx[i] / u3;
      u2 +=
          (1.0 + u4 / u3) * search[i] * dx[i] / u3 - destimate[i] * dx[i] / u3;
    }
    for (i = 0; i < n; i++) {
      estimate[i] = estimate[i] - u1 * destimate[i] - u2 * search[i];
    }
  }

  /* CALCULATE THE DERIVATIVE ALONG THE NEW SEARCH VECTOR. */
  search = estimate;
  dg1 = 0.0;
  for (i = 0; i < n; i++) {
    dg1 += search[i] * dx[i];
  }

  /* IF THE NEW DIRECTION IS NOT A DESCENT DIRECTION,STOP. */
  if (dg1 <= 0.0) {

    /* UPDATE NRST TO ASSURE AT LEAST ONE RESTART EVERY N ITERATIONS. */
    if (nrst == n) {
      nrst = 0;
    }
    nrst++;
    gradient_direction = false;
    goto g40;
  }

/* ROUNDOFF HAS PRODUCED A BAD DIRECTION. */

end:
  // If the 'best current estimate' is better than the current state, return
  // that:
  bestf = get_score(float_indices, estimate, destimate);
  if (bestf < f) {
    f = bestf;
  } else {
    // Otherwise, restore the current state x (note that we already have the
    // state x and its derivatives dx, so it's rather inefficient to
    // recalculate the score here, but it's cleaner)
    f = get_score(float_indices, x, dx);
  }
  update_states();
  return f;
}