Esempio n. 1
0
  void KernelModel::setKernel (const vectord &thetav, 
			      const vectord &stheta,
			      std::string k_name, 
			      size_t dim)
  {
    KernelFactory mKFactory;

    mKernel.reset(mKFactory.create(k_name, dim));

    if ((thetav.size() == 1) && (stheta.size() == 1) && (mKernel->nHyperParameters() != 1))
      {
	// We assume isotropic prior, so we replicate the vectors for all dimensions
	size_t n = mKernel->nHyperParameters();

	FILE_LOG(logINFO) << "Expected " << n << " hyperparameters."
			  << " Replicating parameters and prior.";

	vectord newthetav = svectord(n,thetav(0));
	vectord newstheta = svectord(n,stheta(0));

	setKernelPrior(newthetav,newstheta);
	mKernel->setHyperParameters(newthetav);
      }
    else
      {
	setKernelPrior(thetav,stheta);
	mKernel->setHyperParameters(thetav);
      }
  }
Esempio n. 2
0
 NLOPT_Optimization::NLOPT_Optimization(RGBOptimizable* rgbo, size_t dim):
 mDown(dim),mUp(dim)
 { 
   rbobj = NULL;             rgbobj = new RGBOptimizableWrapper(rgbo);
   alg = DIRECT;             maxEvals = MAX_INNER_EVALUATIONS;
   setLimits(zvectord(dim),svectord(dim,1.0));  
 };
Esempio n. 3
0
int GP_Hedge::update_hedge()
{
    // We just care about the differences
    double max_l = *std::max_element(loss_.begin(),loss_.end());
    loss_ += svectord(loss_.size(),max_l);

    // To avoid overflow
    double mean_g = std::accumulate(gain_.begin(),gain_.end(),0.0)
                    / static_cast<double>(gain_.size());
    gain_ -= svectord(gain_.size(),mean_g);

    // Optimal eta according to Shapire
    double max_g = *std::max_element(gain_.begin(),gain_.end());
    double eta = (std::min)(10.0,sqrt(2.0*log(3.0)/max_g));

    // Compute probabilities
    std::transform(gain_.begin(), gain_.end(), prob_.begin(),
                   boost::bind(softmax,_1,eta));

    //Normalize
    double sum_p =std::accumulate(prob_.begin(),prob_.end(),0.0);
    prob_ /= sum_p;

    //Update bandits gain
    gain_ -= loss_;

    std::partial_sum(prob_.begin(), prob_.end(), cumprob_.begin(),
                     std::plus<double>());

    randFloat sampleUniform( *mtRandom, realUniformDist(0,1));
    double u = sampleUniform();

    for (size_t i=0; i < cumprob_.size(); ++i)
    {
        if (u < cumprob_(i))
            return i;
    }
    FILE_LOG(logERROR) << "Error updating Hedge algorithm. "
                       << "Selecting first criteria by default.";
    return 0;
};