예제 #1
0
int main(int argc, char ** argv) {
  MPI_Init(&argc, &argv);

  QUESO::FullEnvironment env(MPI_COMM_WORLD, "", "", NULL);

  QUESO::VectorSpace<QUESO::GslVector, QUESO::GslMatrix> paramSpace(env,
      "space_", 3, NULL);

  QUESO::GslVector minBound(paramSpace.zeroVector());
  minBound[0] = -10.0;
  minBound[1] = -10.0;
  minBound[2] = -10.0;

  QUESO::GslVector maxBound(paramSpace.zeroVector());
  maxBound[0] = 10.0;
  maxBound[1] = 10.0;
  maxBound[2] = 10.0;

  QUESO::BoxSubset<QUESO::GslVector, QUESO::GslMatrix> domain("", paramSpace,
      minBound, maxBound);

  ObjectiveFunction<QUESO::GslVector, QUESO::GslMatrix> objectiveFunction(
      "", domain);

  QUESO::GslVector initialPoint(paramSpace.zeroVector());
  initialPoint[0] = 9.0;
  initialPoint[1] = -9.0;
  initialPoint[1] = -1.0;

  QUESO::GslOptimizer optimizer(objectiveFunction);

  double tol = 1.0e-10;
  optimizer.setTolerance(tol);
  optimizer.set_solver_type(QUESO::GslOptimizer::STEEPEST_DESCENT);

  QUESO::OptimizerMonitor monitor(env);
  monitor.set_display_output(true,true);

  std::cout << "Solving with Steepest Decent" << std::endl;
  optimizer.minimize(&monitor);

  if (std::abs( optimizer.minimizer()[0] - 1.0) > tol) {
    std::cerr << "GslOptimize failed.  Found minimizer at: " << optimizer.minimizer()[0]
              << std::endl;
    std::cerr << "Actual minimizer is 1.0" << std::endl;
    queso_error();
  }

  std::string nm = "nelder_mead2";
  optimizer.set_solver_type(nm);
  monitor.reset();
  monitor.set_display_output(true,true);

  std::cout << std::endl << "Solving with Nelder Mead" << std::endl;
  optimizer.minimize(&monitor);

  monitor.print(std::cout,false);

  return 0;
}
/* the objective (fitness) function to be minimized */
double fitfun(double const *x, int N) {
  double * predictions;
  double rho;

  rho = 720;
 
  predictions = forwardModel(N,x,rho, 12.0, 16.0, 20.0);
  return objectiveFunction(32, predictions, observations);

}
예제 #3
0
void
GslOptimizer::setFstepSize(double fstepSize)
{
  this->m_optionsObj->m_fstepSize = fstepSize;

  GslVector fstepSizeVector(
      objectiveFunction().domainSet().vectorSpace().zeroVector());
  fstepSizeVector.cwSet(fstepSize);

  this->set_step_size(fstepSizeVector);
}
Point2f GoalPostDetector::findTheShift ()
{

	CannyThreshold();
	createPaddedImg();
	getPointsAlongTheGoalBar();

	 CMAES<float> evo;


		    float *arFunvals, *xfinal, *const*pop;
		    // Initialize everything
		    const int dim = 2;
		    float xstart[dim];


		    for(int i=0; i<dim; i++) xstart[i] = 0.0;
		    float stddev[dim];
		    for(int i=0; i<dim; i++) stddev[i] = 0.05;
		    Parameters<float> parameters;

		    parameters.init(dim, xstart, stddev);
		    arFunvals = evo.init(parameters);

		    // Iterate until stop criterion holds
		    while(!evo.testForTermination())
		    {
		      // Generate lambda new search points, sample population
		      pop = evo.samplePopulation();

		      // condition: solution dx and dy should not be bigger than squareSize
		       for (int i = 0; i < evo.get(CMAES<float>::PopSize); ++i)
		           while (abs(pop[i][0])*scale>=windowSearchSize||abs(pop[i][1])*scale>=windowSearchSize)
		            evo.reSampleSingle(i);

		      // evaluate the new search points using objectiveFunction from above
		      for (int i = 0; i < evo.get(CMAES<float>::Lambda); ++i)
		        {
		          arFunvals[i]=  objectiveFunction(pop[i]);
		        }
		      evo.updateDistribution(arFunvals);

		    }

	       return Point2f(evo.getNew(CMAES<float>::XMean)[0]*scale,
	    		          evo.getNew(CMAES<float>::XMean)[1]*scale);

}
예제 #5
0
RandomSearch::Candidate RandomSearch::search(const std::vector<std::pair<float, float>>& searchSpace, const int kIterLimit)
{
    srand((unsigned)time(0));
    RandomSearch::Candidate best;
    for (int i = 0; i < kIterLimit; ++i)
    {
        RandomSearch::Candidate candidate;
        candidate.values = randomVector(searchSpace);
        candidate.cost = objectiveFunction(candidate.values);
        if (!i || candidate.cost < best.cost)
        {
            best.cost = candidate.cost;
            best.values.swap(candidate.values);
        }
    }
    return best;
}
예제 #6
0
  // custom training procedured
  bool svm::takeStep(const int& i1, const int& i2) {
    if (i1 == i2) {
      return false;
    }

    //debug("Taking step: " << i1 << " and " << i2 << "\n");

    // old alphas
    double alph1=currentAlpha->at(i1);
    // new alphas
    double a1,a2;
    double y1=currentTarget->at(i1);

    double e1=errorCache.at(i1);

    //debug("i1=" << i1 << ", y1=" << y1 << ", alpha1=" << alph1 << ", e1=" << e1 << "\n");

    double s=y1*y2;

    const double C=getParameters().C;


    double L,H;

    if (lti::abs(y1-y2) > syseps) {
      L=lti::max(0.0,alph2-alph1);
      H=lti::min(C,C+alph2-alph1);
    } else {
      L=lti::max(0.0,alph1+alph2-C);
      H=lti::min(C,alph1+alph2);
    }

    //debug("L=" << L << ", H=" << H << "\n");

    if (lti::abs(L-H) < syseps) {
      return false;
    }
    double k11=kernels[currentClass]->apply(trainData->getRow(i1),trainData->getRow(i1));
    double k12=kernels[currentClass]->apply(trainData->getRow(i1),trainData->getRow(i2));
    double k22=kernels[currentClass]->apply(trainData->getRow(i2),trainData->getRow(i2));

    double eta=2*k12-k11-k22;

    if (eta < 0) {
      //debug("eta < 0\n");
      a2=alph2-y2*(e1-e2)/eta;
      if (a2 < L) {
        a2=L;
      } else if (a2 > H) {
        a2=H;
      }
    } else {
      //debug("eta >= 0\n");
      currentAlpha->at(i2)=L;
      double low=objectiveFunction();
      currentAlpha->at(i2)=H;
      double high=objectiveFunction();
      currentAlpha->at(i2)=alph2;
      if (low > high+epsilon) {
        a2=L;
      } else if ( low < high-epsilon) {
        a2=H;
      } else {
        a2=alph2;
      }
    }
    //debug("Checking alpha2[" << i2 << "]: old is " << alph2 << ", new is " << a2 << "\n");
    if (lti::abs(a2-alph2) < epsilon*(a2+alph2+epsilon)) {
      return false;
    }
    a1=alph1+s*(alph2-a2);
    //debug("Checking alpha1[" << i1 << "]: old is " << alph1 << ", new is " << a1 << "\n");
    // update threshold to reflect change in lagrange multipliers
    double w1   = y1*(a1 - alph1);
    double w2   = y2*(a2 - alph2);
    double b1   = e1 + w1*k11 + w2*k12;
    double b2   = e2 + w1*k12 + w2*k22;
    double bold = bias[currentClass];

    bias[currentClass] += 0.5*(b1 + b2);
    updateBiasError(bias[currentClass]-bold);
    setAlpha(i1,a1);
    setAlpha(i2,a2);

    return true;
  }