bool Connection::Progress(curl_off_t total_download, curl_off_t current_download, curl_off_t total_upload, curl_off_t current_upload) { WriteConnectionLog(this) << "Progress meter. " << current_download << " downloaded, " << total_download << " expected; " << current_upload << " uploaded, " << total_upload << " expected."; bool is_succeeded = true; if (progress_callback_) { is_succeeded = progress_callback_(this->shared_from_this(), total_download, current_download, total_upload, current_upload); } if (! is_succeeded) { WriteConnectionLog(this) << "Aborted by progress meter."; } return is_succeeded; }
void CurlHttpRequest::NotifyProgress( double dltotal, double dlnow, double ultotal, double ulnow) { if (!progress_callback_) { return; } if (callback_message_loop_ != nullptr) { callback_message_loop_->PostTask( nbase::Bind(progress_callback_, ultotal, ulnow, dltotal, dlnow)); } else { progress_callback_(ultotal, ulnow, dltotal, dlnow); } }
bool boosted_trees::train(const ml_data &mld) { trees_.clear(); if(type_ != ml_model_type::regression) { log_error("boosting only implemented for regression...\n"); return(false); } // // store the original target value as an unused // feature at the end of each instance, and // the boosted ensemble prediction after that. // for(const auto &inst_ptr : mld) { ml_instance &instance = *inst_ptr; ml_feature_value &fv = instance[index_of_feature_to_predict_]; instance.push_back(fv); ml_feature_value ensemble = {}; instance.push_back(ensemble); } decision_tree boosted_tree{mlid_, index_of_feature_to_predict_, max_tree_depth_, min_leaf_instances_, features_to_consider_per_node_, seed_, true}; ml_rng rng(seed_); // // build the ensemble. early stopping via callback // for(ml_uint ii=0; ii < number_of_trees_; ++ii) { ml_data mld_iter; sample_without_replacement(mld, mld_iter, subsample_, rng); boosted_tree.set_seed(seed_ + ii); // // start with the optimal constant model // boosted_tree.set_max_tree_depth((ii == 0) ? 0 : max_tree_depth_); log("\nbuilding boosted tree %u\n", ii+1); if(!boosted_tree.train(mld_iter)) { log_error("failed to build boosted tree...\n"); return(false); } optimize_leaf_nodes(mlid_, loss_func_, boosted_tree); trees_.push_back(boosted_tree); // // update the residual // for(const auto &inst_ptr : mld) { ml_instance &instance = *inst_ptr; ml_feature_value &residual = instance[index_of_feature_to_predict_]; ml_double yi = instance[mlid_.size()].continuous_value; ml_feature_value pred = boosted_tree.evaluate(instance); instance[mlid_.size() + 1].continuous_value += (ii == 0) ? pred.continuous_value : (learning_rate_ * pred.continuous_value); ml_double yhat = instance[mlid_.size() + 1].continuous_value; // // use custom gradient function if given, otherwise squared error gradient // if(gradient_func_) { residual.continuous_value = gradient_func_(yi, yhat); } else { residual.continuous_value = yi - yhat; } } // // if a progress callback was given, exercise // and make this the final boosting iteration if // it returns false // if(progress_callback_) { if(!progress_callback_(ii+1)) { break; } } } // // restore the original target value, and // remove the temp features added to the end // of each instance // for(const auto &inst_ptr : mld) { ml_instance &instance = *inst_ptr; instance[index_of_feature_to_predict_] = instance[mlid_.size()]; instance.resize(mlid_.size()); } return(true); }