void regression_serv::get_status(status_t& status) const { status_t my_status; gresser_.get_model()->get_status(my_status); my_status["storage"] = gresser_.get_model()->type(); status.insert(my_status.begin(), my_status.end()); }
void recommender_serv::get_status(status_t& status) const { status_t my_status; my_status["clear_row_cnt"] = lexical_cast<string>(clear_row_cnt_); my_status["update_row_cnt"] = lexical_cast<string>(update_row_cnt_); status.insert(my_status.begin(), my_status.end()); }
void yield(result_ref_t result) // in the same thread as 'work()'. { boost::mutex::scoped_lock lock(m_mutex); // 'result' is alive until next increment, // as far as 'value' can go across thread-boundary. m_presult = boost::addressof(result); m_status.reset(is_incrementing::value); m_cond.notify_one(); while (!m_status.test(is_incrementing::value) && !m_status.test(is_interrupted::value)) m_cond.wait(lock); if (m_status.test(is_interrupted::value)) { exit_exception ex; boost::throw_exception(ex); } }
void work() { try { m_rou(pstade::perfect<void>( boost::lambda::bind(&self_t::yield, this, boost::lambda::_1) )); } catch (exit_exception const&) { } boost::mutex::scoped_lock lock(m_mutex); m_status.set(is_end::value); m_cond.notify_one(); }
void anomaly_serv::get_status(status_t& status) const { status_t my_status; my_status["storage"] = anomaly_->get_model()->type(); status.insert(my_status.begin(), my_status.end()); }
void regression_serv::get_status(status_t& status) const { status_t my_status; regression_->get_status(my_status); status.insert(my_status.begin(), my_status.end()); }
void kvs_serv::get_status(status_t& status) const { std::stringstream ss; ss << data_.size(); status.insert(std::make_pair("size", ss.str())); }
void stat_serv::get_status(status_t& status) const { status.insert(make_pair("storage", stat_->get_model()->type())); }