Пример #1
0
  double run_nlopt(nlopt::algorithm algo, eval_func fpointer,
		   vectord& Xnext, int maxf, const std::vector<double>& vd, 
		   const std::vector<double>& vu, void* objPointer)
  {
    double fmin = 0.0;
    size_t n = Xnext.size(); 
    nlopt::opt opt (algo,n);

    std::vector<double> xstd(n);
    opt.set_lower_bounds(vd);
    opt.set_upper_bounds(vu);
    opt.set_min_objective(fpointer, objPointer);
    opt.set_maxeval(maxf);
    
    // It seems BOBYQA can be unstable if the same point is repeated
    // tested over and over. NLOPT bug?
    opt.set_ftol_rel(1e-12);	
    opt.set_ftol_abs(1e-12);

    std::copy(Xnext.begin(),Xnext.end(),xstd.begin());
      
    try 
      { 
	opt.optimize(xstd, fmin);  
      }
    catch (nlopt::roundoff_limited& e)
      {
	FILE_LOG(logDEBUG) << "NLOPT Warning: Potential roundoff error. " 
			   << "In general, this can be ignored.";
      }

    std::copy(xstd.begin(),xstd.end(),Xnext.begin());
    return fmin;
  }
Пример #2
0
  void KernelModel::setKernel (const vectord &thetav, 
			      const vectord &stheta,
			      std::string k_name, 
			      size_t dim)
  {
    KernelFactory mKFactory;

    mKernel.reset(mKFactory.create(k_name, dim));

    if ((thetav.size() == 1) && (stheta.size() == 1) && (mKernel->nHyperParameters() != 1))
      {
	// We assume isotropic prior, so we replicate the vectors for all dimensions
	size_t n = mKernel->nHyperParameters();

	FILE_LOG(logINFO) << "Expected " << n << " hyperparameters."
			  << " Replicating parameters and prior.";

	vectord newthetav = svectord(n,thetav(0));
	vectord newstheta = svectord(n,stheta(0));

	setKernelPrior(newthetav,newstheta);
	mKernel->setHyperParameters(newthetav);
      }
    else
      {
	setKernelPrior(thetav,stheta);
	mKernel->setHyperParameters(thetav);
      }
  }
Пример #3
0
  double evaluateSample( const vectord &Xi ) 
  {
    double x[100];
    for (size_t i = 0; i < Xi.size(); ++i)
	x[i] = Xi(i);
    return testFunction(Xi.size(),x,NULL,NULL);
  };
Пример #4
0
    void setHyperParameters(const vectord &theta) 
    {
      if(theta.size() != n_params)
	{
	  throw std::invalid_argument("Wrong number of kernel hyperparameters");
	}
      params = theta; //TODO: To make enough space. Make it more efficient.
      std::transform(theta.begin(), theta.end(), params.begin(), (double (*)(double)) exp);
    };
Пример #5
0
    inline double computeWeightedNorm2(const vectord &x1, const vectord &x2)
    {
      assert(n_inputs == x1.size());
      assert(x1.size() == x2.size());
      assert(x1.size() == params.size());

      vectord xd = x1-x2;
      vectord r = utils::ublas_elementwise_div(xd, params);
      return norm_2(r);
    };
Пример #6
0
    int setParameters(const vectord& params)
    { 
      if(params.size() != n_params)
	{
	  FILE_LOG(logERROR) << "Wrong number of mean function parameters"; 
	  return -1; 
	}

      mConstParam = params(0);
      mParameters = boost::numeric::ublas::project(params, 
						   boost::numeric::ublas::range(1, params.size())); 
      return 0;
    };
Пример #7
0
/**************************************************************************************************
 *  Procedure                                                                                     *
 *                                                                                                *
 *  Description: getSigmaPoints                                                                   *
 *  Class      : UnscentedExpectedImprovement                                                     *
 **************************************************************************************************/
void UnscentedExpectedImprovement::getSigmaPoints(const vectord&          x             ,
                                                  const double            scale         ,
                                                  const int               dim           ,
                                                  const matrixd&          matrix_noise  ,
                                                  std::vector<vectord>&   xx            ,
                                                  std::vector<double>&    w             ,
                                                  const bool              matrix_convert)
{
    const size_t n = dim;

    assert(matrix_noise.size1() == n);
    assert(matrix_noise.size2() == n);
    assert(x.size()             == n);

    matrixd px;
    if (matrix_convert) px = UnscentedExpectedImprovement::convertMatrixNoise(matrix_noise, scale, dim);
    else                px = matrix_noise;

    // Output variable intialization
    xx = std::vector<vectord>();
    w  = std::vector<double>();
    xx.push_back(x);
    w .push_back(scale / (dim + scale));

    // Calculate query_i
    for (size_t col = 0; col < n; col += 1)
    {
        xx.push_back(x - boost::numeric::ublas::column(px, col));
        xx.push_back(x + boost::numeric::ublas::column(px, col));
        w .push_back(0.5 / (dim + scale));
        w .push_back(0.5 / (dim + scale));
    }
}
/**************************************************************************************************
 *  Procecure                                                                                     *
 *                                                                                                *
 *  Description: chooseActiveVariables                                                            *
 *  Class      : iCubOptimizable                                                                  *
 **************************************************************************************************/
void iCubOptimizable::chooseActiveVariables(vectord& query)
{
    if (dim != query.size())
    {
        cout << endl << "[ERROR] Query size is not equal to mask active components. From: iCubOptimizable::chooseActiveVariables.";

        exit(-1);
    }

    uint    variables_updated = 0;
    vectord result            = vectord(_original_dim, 0.0);

    for (uint index = 0; index < _original_dim; index += 1)
    {
        if (_active_variables_mask[index] == true)
        {
            result[index]      = query[variables_updated];
            variables_updated += 1;
        }
        else
        {
            result[index]      = _icubparams.default_query[index];
        }
    }

    // Return new query
    query = result;
}
Пример #9
0
inline vectord t_osz(const vectord& x)
{
    vectord r = x;
    for (int i = 0; i < x.size(); i++)
        r(i) = sign(x(i)) * exp(hat(x(i)) + 0.049 * sin(c1(x(i)) * hat(x(i))) + sin(c2(x(i)) * hat(x(i))));
    return r;
}
Пример #10
0
  void Dataset::setSamples(const vectord &y)
  {
    mY = y;
    for (size_t i=0; i<y.size(); ++i)
      {
	updateMinMax(i);
      } 
  };
Пример #11
0
  inline void KernelModel::computeCrossCorrelation(const vecOfvec& XX, 
						   const vectord &query,
						   vectord& knx)
  {
    std::vector<vectord>::const_iterator x_it  = XX.begin();
    vectord::iterator k_it = knx.begin();
    while(x_it != XX.end())
      {	*k_it++ = (*mKernel)(*x_it++, query); }
  }
Пример #12
0
 vectord getFeatures(const vectord& x) 
 {
   using boost::numeric::ublas::range;
   using boost::numeric::ublas::project;
   vectord res(x.size()+1);
   res(0) = 1;
   project(res,range(1,res.size())) = x;
   return res; 
 };  
Пример #13
0
  inline void KernelModel::setKernelPrior (const vectord &theta, 
					   const vectord &s_theta)
  {
    for (size_t i = 0; i<theta.size(); ++i)
      {
	boost::math::normal n(theta(i),s_theta(i));
	priorKernel.push_back(n);
      }
  };
Пример #14
0
double gauss(const vectord& x, const vectord& mu, const matrixd& sigma)
{
  double n = static_cast<double>(x.size());
  const vectord vd = x-mu;
  matrixd invS = sigma;
  bayesopt::utils::inverse_cholesky(sigma,invS);
  matrixd sig = sigma;

  return pow(2*M_PI,n/2)*pow(determinant(sig),0.5)*exp(-0.5*inner_prod(vd,prod(invS,vd)));
}
Пример #15
0
    double operator()( const vectord &x)
    {
      ++nCalls;
      size_t nDims = x.size();
    
      double beta = sqrt(2*log(static_cast<double>(nCalls*nCalls))*(nDims+1) 
			 + log(static_cast<double>(nDims))*nDims*mCoef);

      ProbabilityDistribution* d_ = mProc->prediction(x);
      return d_->lowerConfidenceBound(beta); 
    };
Пример #16
0
    int setParameters(const vectord &theta) 
    {
      if(theta.size() != n_params)
	{
	  FILE_LOG(logERROR) << "Wrong number of mean function parameters"; 
	  return -1; 
	}
   
      mParameters = theta;
      return 0;
    };
Пример #17
0
  double NLOPT_Optimization::localTrialAround(vectord& Xnext)
  {
    assert(mDown.size() == Xnext.size());
    assert(mUp.size() == Xnext.size());
    const size_t n = Xnext.size();

    for (size_t i = 0; i < n; ++i) 
      {
	if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
	  {
	    FILE_LOG(logDEBUG) << Xnext;
	    throw std::invalid_argument("Local trial withour proper"
					" initial point.");
	  }
      }

    nlopt::algorithm algo = nlopt::LN_BOBYQA;
    eval_func fpointer = &(NLOPT_Optimization::evaluate_nlopt);
    void* objPointer = static_cast<void *>(rbobj);
    const size_t nIter = 20;
    // std::vector<double> vd(n);
    // std::vector<double> vu(n);

    // for (size_t i = 0; i < n; ++i) 
    //   {
    // 	vd[i] = Xnext(i) - 0.01;
    // 	vu[i] = Xnext(i) + 0.01;
    //   }

    vectord start = Xnext;

    double fmin = run_nlopt(algo,fpointer,Xnext,nIter,
			    mDown,mUp,objPointer);

    FILE_LOG(logDEBUG) << "Near trial " << nIter << "|" 
		       << start << "-> " << Xnext << " f() ->" << fmin;
    
    return fmin;

  }
Пример #18
0
  inline void KernelRegressor::setHyperParameters(const vectord &theta)
  { 
    using boost::numeric::ublas::subrange;
    if (mLearnAll)
      {
	size_t nk = mKernel.nHyperParameters();
	size_t nm = mMean.nParameters();

	mKernel.setHyperParameters(subrange(theta,0,nk));

	vectord result(nm);
	std::transform(theta.begin()+nk, theta.begin()+nk+nm, 
		       result.begin(), (double (*)(double)) log);
	mMean.setParameters(result);

	mSigma = std::exp(theta(nk+nm));
      }     
    else
      {
	mKernel.setHyperParameters(theta); 
      }
  };
Пример #19
0
    int setHyperParameters(const vectord &theta) 
    {
      using boost::numeric::ublas::subrange;

      size_t n_lhs = left->nHyperParameters();
      size_t n_rhs = right->nHyperParameters();
      if (theta.size() != n_lhs + n_rhs)
	{
	  FILE_LOG(logERROR) << "Wrong number of kernel hyperparameters"; 
	  return -1; 
	}
      left->setHyperParameters(subrange(theta,0,n_lhs));
      right->setHyperParameters(subrange(theta,n_lhs,n_lhs+n_rhs));
      return 0;
    };
Пример #20
0
  void DiscreteModel::sampleInitialPoints(matrixd& xPoints, vectord& yPoints)
  {

    vecOfvec perms = mInputSet;
    
    // By using random permutations, we guarantee that 
    // the same point is not selected twice
    utils::randomPerms(perms,mEngine);
    
    // vectord xPoint(mInputSet[0].size());
    for(size_t i = 0; i < yPoints.size(); i++)
      {
	const vectord xP = perms[i];
	row(xPoints,i) = xP;
	yPoints(i) = evaluateSample(xP);
      }
  }
Пример #21
0
  double evaluateSample( const vectord& xin)
  {
     if (xin.size() != 2)
      {
	std::cout << "WARNING: This only works for 2D inputs." << std::endl
		  << "WARNING: Using only first two components." << std::endl;
      }

    float y = -1;
    double x1 = xin(0);
    double x2 = xin(1);
    
    bayesopt::utils::FileParser fp("results.txt");
    std::string call = "python ../examples/standalone_calls/eval_branin.py " + 
      fp.to_string(x1) + " " + fp.to_string(x2);
    int ret = system(call.c_str());
    
    fp.openInput();
    fp.read("y",y);
    fp.close();
    
    return y;
  }
Пример #22
0
 inline double computeWeightedNorm2(const vectord &x1, const vectord &x2)
 {  
   assert(n_inputs == x1.size());
   assert(x1.size() == x2.size());
   return norm_2(x1-x2)/params(0); 
 };
Пример #23
0
 double operator()(const vectord &x)
 { 
   vectord x2(x.size());
   mProc->getLastSample(x2);
   return mW*norm_2(x-x2);
 };
Пример #24
0
  double NLOPT_Optimization::run(vectord &Xnext)
  {   
    assert(mDown.size() == Xnext.size());
    assert(mUp.size() == Xnext.size());

    eval_func fpointer;
    void *objPointer;

    size_t n = Xnext.size();
    double fmin = 1;
    int maxf1 = maxEvals*n;
    int maxf2 = 0;    // For a second pass
    const double coef_local = 0.1;
    //int ierror;

    // If Xnext is outside the bounding box, maybe it is undefined
    for (size_t i = 0; i < n; ++i) 
      {
	if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
	  {
	    Xnext(i)=(mDown[i]+mUp[i])/2.0;
	  }
      }

    //    nlopt_opt opt;
    nlopt::algorithm algo;
    switch(alg)
      {
      case DIRECT: // Pure global. No gradient
	algo = nlopt::GN_DIRECT_L;
	fpointer = &(NLOPT_Optimization::evaluate_nlopt);
	objPointer = static_cast<void *>(rbobj);
	break;
      case COMBINED: // Combined local-global (80% DIRECT -> 20% BOBYQA). No gradient
	algo = nlopt::GN_DIRECT_L;
	maxf2 = static_cast<int>(static_cast<double>(maxf1)*coef_local);
	maxf1 -= maxf2;  // That way, the number of evaluations is the same in all methods.
	fpointer = &(NLOPT_Optimization::evaluate_nlopt);
	objPointer = static_cast<void *>(rbobj);
	break;
      case BOBYQA:  // Pure local. No gradient
	algo = nlopt::LN_BOBYQA;
	fpointer = &(NLOPT_Optimization::evaluate_nlopt);
	objPointer = static_cast<void *>(rbobj);
	break;
      case LBFGS:  // Pure local. Gradient based
	algo = nlopt::LD_LBFGS;
	fpointer = &(NLOPT_Optimization::evaluate_nlopt_grad);
	objPointer = static_cast<void *>(rgbobj);
	break;
      default: 
	throw std::invalid_argument("Inner optimization algorithm"
				    " not supported");
      }

    if (objPointer == NULL)
      {
	throw std::invalid_argument("Wrong object model "
				    "(gradient/no gradient)");
      }

    fmin = run_nlopt(algo,fpointer,Xnext,maxf1,
		     mDown,mUp,objPointer);

    FILE_LOG(logDEBUG) << "1st opt " << maxf1 << "-> " << Xnext 
		       << " f() ->" << fmin;
    if (maxf2)
      {
	//If the point is exactly at the limit, we may have trouble.
    	for (size_t i = 0; i < n; ++i) 
	  {
	    if (Xnext(i)-mDown[i] < 0.0001)
	      {
		Xnext(i) += 0.0001;
		FILE_LOG(logDEBUG) << "Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN";
	      }
	    if (mUp[i] - Xnext(i) < 0.0001)
	      {
		Xnext(i) -= 0.0001;
		FILE_LOG(logDEBUG) << "Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN";
	      }
	  }

	// BOBYQA may fail in this point. Could it be that EI is not twice differentiable?
	// fmin = run_nlopt(nlopt::LN_BOBYQA,fpointer,Xnext,maxf2,
	// 		 mDown,mUp,objPointer);
	fmin = run_nlopt(nlopt::LN_COBYLA,fpointer,Xnext,maxf2,
			 mDown,mUp,objPointer);
	FILE_LOG(logDEBUG) << "2nd opt " << maxf2 << "-> " << Xnext 
			   << " f() ->" << fmin;
      }

    return fmin;

  } // innerOptimize (uBlas)
Пример #25
0
 double evaluateSample( const vectord &Xi ) 
 {
   int n = static_cast<int>(Xi.size());
   return  mF(n,&Xi[0],NULL,mOtherData);
 };
Пример #26
0
 inline void NLOPT_Optimization::setLimits(const vectord& down, const vectord& up)
 { 
   std::copy(down.begin(),down.end(),mDown.begin());
   std::copy(up.begin(),up.end(),mUp.begin());
 }