void llvm_model::set_prediction(const ObsId & obs_id, boost::ptr_vector<theta::Function> & coeffs_, boost::ptr_vector<HistogramFunction> & histos_){ observables.insert(obs_id); const size_t n = coeffs_.size(); if(n!=coeffs_.size()) throw invalid_argument("Model::setPrediction: number of histograms and coefficients do not match"); if(histos[obs_id].size()>0 || coeffs[obs_id].size()>0) throw invalid_argument("Model::setPrediction: prediction already set for this observable"); coeffs[obs_id].transfer(coeffs[obs_id].end(), coeffs_.begin(), coeffs_.end(), coeffs_); histos[obs_id].transfer(histos[obs_id].end(), histos_.begin(), histos_.end(), histos_); for(boost::ptr_vector<theta::Function>::const_iterator it=coeffs[obs_id].begin(); it!=coeffs[obs_id].end(); ++it){ ParIds pids = (*it).get_parameters(); parameters.insert(pids.begin(), pids.end()); } size_t nbins = 0; double xmin = NAN, xmax = NAN; bool first = true; for(boost::ptr_vector<HistogramFunction>::const_iterator it=histos[obs_id].begin(); it!=histos[obs_id].end(); ++it){ if(first){ it->get_histogram_dimensions(nbins, xmin, xmax); first = false; } else{ size_t nbins_tmp = 0; double xmin_tmp = NAN, xmax_tmp = NAN; it->get_histogram_dimensions(nbins_tmp, xmin_tmp, xmax_tmp); if(nbins!=nbins_tmp || xmin!=xmin_tmp || xmax!=xmax_tmp){ throw invalid_argument("llvm_model::set_prediction: histogram dimensions mismatch"); } } const ParIds & pids = (*it).get_parameters(); parameters.insert(pids.begin(), pids.end()); } }
double distance::getEuclidean2(boost::ptr_vector<double>& v1, boost::ptr_vector<double>& v2) { double euclidean = 0; int n = v1.size() == v2.size() ? v1.size() : 0; if (n > 0) { for (boost::ptr_vector<double>::iterator it1 = v1.begin(), it2 = v2.begin(); it1 != v1.end(), it2 != v2.end(); it1++, it2++) { euclidean += (*it1 - *it2)*(*it1 - *it2); } euclidean = sqrt(euclidean); return euclidean; } else return (double)-1; }
std::auto_ptr<T> release_ptr( T *p, boost::ptr_vector<T,C,A>& vec) { std::auto_ptr<T> result; for( typename boost::ptr_vector<T,C,A>::iterator it( vec.begin()), e( vec.end()); it != e; ++it) { if( &(*it) == p) { typename boost::ptr_vector<T,C,A>::auto_type ptr = vec.release( it); result.reset( ptr.release()); break; } } return result; }
std::pair<size_t, size_t> tune_params( const double* divs, size_t num_bags, const std::vector<label_type> &labels, const boost::ptr_vector<Kernel> &kernels, const std::vector<double> &c_vals, const svm_parameter &svm_params, size_t folds, size_t num_threads) { typedef std::pair<size_t, size_t> config; size_t num_kernels = kernels.size(); if (num_kernels == 0) { BOOST_THROW_EXCEPTION(std::domain_error( "no kernels in the kernel group")); } else if (num_kernels == 1 && c_vals.size() == 1) { // only one option, we already know what's best return config(0, 0); } // want to be able to take sub-lists of kernels. // this is like c_array(), but constness is slightly different and // it works in old, old boosts. const Kernel * const * kern_array = reinterpret_cast<const Kernel* const*>(&kernels.begin().base()[0]); // how many threads are we using? num_threads = npdivs::get_num_threads(num_threads); if (num_threads > num_kernels) num_threads = num_kernels; if (num_threads == 1) { // don't actually make a new thread if it's just 1-threaded double score; return pick_rand(tune_params_single(divs, num_bags, labels, kern_array, num_kernels, c_vals, svm_params, folds, &score)); } // grunt work to set up multithreading boost::ptr_vector< tune_params_worker<label_type> > workers; std::vector<boost::exception_ptr> errors(num_threads); boost::thread_group worker_threads; std::vector< std::vector<config> > results(num_threads); std::vector<double> scores(num_threads, 0); size_t kerns_per_thread = (size_t) std::ceil(double(num_kernels) / num_threads); size_t kern_start = 0; // give each thread a few kernels and get their most-accurate configs // TODO: better allocation algo for (size_t i = 0; i < num_threads; i++) { int n_kerns = (int)(std::min(kern_start+kerns_per_thread, num_kernels)) - (int)(kern_start); if (n_kerns <= 0) break; workers.push_back(new tune_params_worker<label_type>( divs, num_bags, labels, kern_array + kern_start, n_kerns, c_vals, svm_params, folds, &results[i], &scores[i], errors[i] )); worker_threads.create_thread(boost::ref(workers[i])); kern_start += kerns_per_thread; } worker_threads.join_all(); for (size_t i = 0; i < num_threads; i++) if (errors[i]) boost::rethrow_exception(errors[i]); // get all the best configs into one vector double best_score = *std::max_element( scores.begin(), scores.end()); std::vector<config> best_configs; if (best_score == -std::numeric_limits<double>::infinity()) { FILE_LOG(logERROR) << "all kernels were terrible"; BOOST_THROW_EXCEPTION(std::domain_error("all kernels were terrible")); } kern_start = 0; for (size_t i = 0; i < num_threads; i++) { if (scores[i] == best_score) { for (size_t j = 0; j < results[i].size(); j++) { config cfg = results[i][j]; best_configs.push_back( config(cfg.first + kern_start, cfg.second)); } } kern_start += kerns_per_thread; } return pick_rand(best_configs); }
void Pattern::addAlternatives( boost::ptr_vector<Alternative> & r ) { for( Alternative & alt : r ) { alt.pattern = this; } alternatives.transfer( alternatives.end(), r.begin(), r.end(), r ); }
multiplexing_database::multiplexing_table::multiplexing_table(const boost::shared_ptr<Database> & db, boost::ptr_vector<theta::Table> & underlying_tables_): Table(db), next_icol(0){ underlying_tables.transfer(underlying_tables.end(), underlying_tables_.begin(), underlying_tables_.end(), underlying_tables_); }
tree_node::tree_node(gui2::tree_view_node& node, boost::ptr_vector<gui2::tree_view_node>& children) : children_(children), widget_(&node), itor_(children.begin()) { }
void Pattern::addAlternatives( boost::ptr_vector<Alternative> & r ) { alternatives.transfer( alternatives.end(), r.begin(), r.end(), r ); for( int i = 0; i < alternatives.size(); ++ i ) { alternatives[i].pattern = this; } }
int process_alps_status( char *nd_name, boost::ptr_vector<std::string>& status_info) { char *current_node_id = NULL; char node_index_buf[MAXLINE]; int node_index = 0; struct pbsnode *parent; struct pbsnode *current = NULL; int rc; pbs_attribute temp; hash_table_t *rsv_ht; char log_buf[LOCAL_LOG_BUF_SIZE]; memset(&temp, 0, sizeof(temp)); if ((rc = decode_arst(&temp, NULL, NULL, NULL, 0)) != PBSE_NONE) { log_record(PBSEVENT_DEBUG, PBS_EVENTCLASS_NODE, __func__, "cannot initialize attribute"); return(rc); } /* if we can't find the parent node, ignore the update */ if ((parent = find_nodebyname(nd_name)) == NULL) return(PBSE_NONE); /* keep track of reservations so that they're only processed once per update */ rsv_ht = create_hash(INITIAL_RESERVATION_HOLDER_SIZE); /* loop over each string */ for(boost::ptr_vector<std::string>::iterator i = status_info.begin();i != status_info.end();i++) { const char *str = i->c_str(); if (!strncmp(str, "node=", strlen("node="))) { if (i != status_info.begin()) { snprintf(node_index_buf, sizeof(node_index_buf), "node_index=%d", node_index++); decode_arst(&temp, NULL, NULL, node_index_buf, 0); save_node_status(current, &temp); } if ((current = determine_node_from_str(str, parent, current)) == NULL) break; else continue; } if(current == NULL) continue; /* process the gpu status information separately */ if (!strcmp(CRAY_GPU_STATUS_START, str)) { rc = process_gpu_status(current, i,status_info.end()); str = i->c_str(); continue; } else if (!strncmp(reservation_id, str, strlen(reservation_id))) { const char *just_rsv_id = str + strlen(reservation_id); if (get_value_hash(rsv_ht, just_rsv_id) == -1) { add_hash(rsv_ht, 1, strdup(just_rsv_id)); /* sub-functions will attempt to lock a job, so we must unlock the * reporter node */ unlock_node(parent, __func__, NULL, LOGLEVEL); process_reservation_id(current, str); current_node_id = strdup(current->nd_name); unlock_node(current, __func__, NULL, LOGLEVEL); /* re-lock the parent */ if ((parent = find_nodebyname(nd_name)) == NULL) { /* reporter node disappeared - this shouldn't be possible */ log_err(PBSE_UNKNODE, __func__, "Alps reporter node disappeared while recording a reservation"); free_arst(&temp); free_all_keys(rsv_ht); free_hash(rsv_ht); free(current_node_id); return(PBSE_NONE); } if ((current = find_node_in_allnodes(&parent->alps_subnodes, current_node_id)) == NULL) { /* current node disappeared, this shouldn't be possible either */ unlock_node(parent, __func__, NULL, LOGLEVEL); snprintf(log_buf, sizeof(log_buf), "Current node '%s' disappeared while recording a reservation", current_node_id); log_err(PBSE_UNKNODE, __func__, log_buf); free_arst(&temp); free_all_keys(rsv_ht); free_hash(rsv_ht); free(current_node_id); return(PBSE_NONE); } free(current_node_id); current_node_id = NULL; } } /* save this as is to the status strings */ else if ((rc = decode_arst(&temp, NULL, NULL, str, 0)) != PBSE_NONE) { free_arst(&temp); free_all_keys(rsv_ht); free_hash(rsv_ht); return(rc); } /* perform any special processing */ if (!strncmp(str, cproc_eq, ac_cproc_eq_len)) { set_ncpus(current, parent, str); } else if (!strncmp(str, state, strlen(state))) { set_state(current, str); } } /* END processing the status update */ if (current != NULL) { snprintf(node_index_buf, sizeof(node_index_buf), "node_index=%d", node_index++); decode_arst(&temp, NULL, NULL, node_index_buf, 0); save_node_status(current, &temp); unlock_node(current, __func__, NULL, LOGLEVEL); } unlock_node(parent, __func__, NULL, LOGLEVEL); free_all_keys(rsv_ht); free_hash(rsv_ht); return(PBSE_NONE); } /* END process_alps_status() */