void uniformSampling(M& Result, randEngine& mtRandom) { randFloat sample( mtRandom, realUniformDist(0,1) ); size_t nA = Result.size1(); size_t nB = Result.size2(); // TODO: Improve with iterators // std::generate(Result.begin2(),Result.end2(),sample); for (size_t i = 0; i < nA; i++) for (size_t j = 0; j < nB; j++) Result(i,j) = sample(); }
void lhs(M& Result, randEngine& mtRandom) { randFloat sample( mtRandom, realUniformDist(0,1) ); size_t nA = Result.size1(); size_t nB = Result.size2(); double ndA = static_cast<double>(nA); // std::vector<int> perms(nA); for (size_t i = 0; i < nB; i++) { // TODO: perms starts at 1. Check this std::vector<int> perms = return_index_vector(nA); randomPerms(perms, mtRandom); for (size_t j = 0; j < nA; j++) { Result(j,i) = ( static_cast<double>(perms[j]) - sample() ) / ndA; } } }
int GP_Hedge::update_hedge() { // We just care about the differences double max_l = *std::max_element(loss_.begin(),loss_.end()); loss_ += svectord(loss_.size(),max_l); // To avoid overflow double mean_g = std::accumulate(gain_.begin(),gain_.end(),0.0) / static_cast<double>(gain_.size()); gain_ -= svectord(gain_.size(),mean_g); // Optimal eta according to Shapire double max_g = *std::max_element(gain_.begin(),gain_.end()); double eta = (std::min)(10.0,sqrt(2.0*log(3.0)/max_g)); // Compute probabilities std::transform(gain_.begin(), gain_.end(), prob_.begin(), boost::bind(softmax,_1,eta)); //Normalize double sum_p =std::accumulate(prob_.begin(),prob_.end(),0.0); prob_ /= sum_p; //Update bandits gain gain_ -= loss_; std::partial_sum(prob_.begin(), prob_.end(), cumprob_.begin(), std::plus<double>()); randFloat sampleUniform( *mtRandom, realUniformDist(0,1)); double u = sampleUniform(); for (size_t i=0; i < cumprob_.size(); ++i) { if (u < cumprob_(i)) return i; } FILE_LOG(logERROR) << "Error updating Hedge algorithm. " << "Selecting first criteria by default."; return 0; };
GP_Hedge::GP_Hedge(): mtRandom(100u), sampleUniform( mtRandom, realUniformDist(0,1)) {};