threads::thread_state_ex_enum suspend(boost::posix_time::ptime const& at_time, char const* description, error_code& ec) { // handle interruption, if needed this_thread::interruption_point(); // schedule a thread waking us up at_time threads::thread_self& self = threads::get_self(); threads::thread_id_type id = self.get_thread_id(); threads::set_thread_state(id, at_time, threads::pending, threads::wait_signaled, threads::thread_priority_critical, ec); if (ec) return threads::wait_unknown; // let the thread manager do other things while waiting threads::thread_state_ex_enum statex = threads::wait_unknown; { // verify that there are no more registered locks for this OS-thread util::verify_no_locks(); // suspend the HPX-thread detail::reset_lco_description desc(id, description, ec); #if HPX_THREAD_MAINTAIN_BACKTRACE_ON_SUSPENSION detail::reset_backtrace bt(id, ec); #endif statex = self.yield(threads::suspended); } // handle interrupt and abort this_thread::interruption_point(); if (statex == threads::wait_abort) { hpx::util::osstream strm; strm << "thread(" << id << ", " << threads::get_thread_description(id) << ") aborted (yield returned wait_abort)"; HPX_THROWS_IF(ec, yield_aborted, description, hpx::util::osstream_get_string(strm)); } if (&ec != &throws) ec = make_success_code(); return statex; }
std::size_t set_thread_data(thread_id_type const& id, std::size_t d, error_code& ec) { hpx::applier::applier* app = hpx::applier::get_applier_ptr(); if (NULL == app) { HPX_THROWS_IF(ec, invalid_status, "hpx::threads::set_thread_data", "global applier object is not accessible"); return 0; } if (&ec != &throws) ec = make_success_code(); return app->get_thread_manager().set_thread_data(id, d, ec); }
char const* set_thread_lco_description(thread_id_type const& id, char const* desc, error_code& ec) { hpx::applier::applier* app = hpx::applier::get_applier_ptr(); if (NULL == app) { HPX_THROWS_IF(ec, invalid_status, "hpx::threads::set_thread_lco_description", "global applier object is not accessible"); return NULL; } if (&ec != &throws) ec = make_success_code(); return app->get_thread_manager().set_lco_description(id, desc); }
util::backtrace const* get_thread_backtrace(thread_id_type const& id, error_code& ec) #endif { hpx::applier::applier* app = hpx::applier::get_applier_ptr(); if (NULL == app) { HPX_THROWS_IF(ec, invalid_status, "hpx::threads::get_thread_backtrace", "global applier object is not accessible"); return NULL; } if (&ec != &throws) ec = make_success_code(); return app->get_thread_manager().get_backtrace(id); }
/// The function \a suspend will return control to the thread manager /// (suspends the current thread). It sets the new state of this thread /// to the thread state passed as the parameter. /// /// If the suspension was aborted, this function will throw a /// \a yield_aborted exception. threads::thread_state_ex_enum suspend(threads::thread_state_enum state, char const* description, error_code& ec) { // handle interruption, if needed this_thread::interruption_point(); // let the thread manager do other things while waiting threads::thread_self& self = threads::get_self(); threads::thread_state_ex_enum statex = threads::wait_unknown; { // verify that there are no more registered locks for this OS-thread #if HPX_HAVE_VERIFY_LOCKS util::verify_no_locks(); #endif #if HPX_THREAD_MAINTAIN_DESCRIPTION threads::thread_id_type id = self.get_thread_id(); detail::reset_lco_description desc(id, description, ec); #endif #if HPX_THREAD_MAINTAIN_BACKTRACE_ON_SUSPENSION detail::reset_backtrace bt(id, ec); #endif // suspend the HPX-thread statex = self.yield(state); } // handle interruption, if needed this_thread::interruption_point(); // handle interrupt and abort if (statex == threads::wait_abort) { threads::thread_id_type id = self.get_thread_id(); hpx::util::osstream strm; strm << "thread(" << id << ", " << threads::get_thread_description(id) << ") aborted (yield returned wait_abort)"; HPX_THROWS_IF(ec, yield_aborted, description, hpx::util::osstream_get_string(strm)); } if (&ec != &throws) ec = make_success_code(); return statex; }
void hwloc_topology::set_thread_affinity_mask( mask_type mask , error_code& ec ) const { // {{{ hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); for (std::size_t i = 0; i < sizeof(std::size_t) * CHAR_BIT; ++i) { if (mask & (static_cast<std::size_t>(1) << i)) { hwloc_bitmap_set(cpuset, static_cast<unsigned int>(i)); } } { scoped_lock lk(topo_mtx); if (hwloc_set_cpubind(topo, cpuset, HWLOC_CPUBIND_STRICT | HWLOC_CPUBIND_THREAD)) { // Strict binding not supported or failed, try weak binding. if (hwloc_set_cpubind(topo, cpuset, HWLOC_CPUBIND_THREAD)) { hwloc_bitmap_free(cpuset); HPX_THROWS_IF(ec, kernel_error , "hpx::threads::hwloc_topology::set_thread_affinity_mask" , boost::str(boost::format( "failed to set thread %x affinity mask") % mask)); if (ec) return; } } } #if defined(__linux) || defined(linux) || defined(__linux__) || defined(__FreeBSD__) sleep(0); // Allow the OS to pick up the change. #endif hwloc_bitmap_free(cpuset); if (&ec != &throws) ec = make_success_code(); } // }}}
boost::uint32_t get_locality_id( error_code& ec = throws ) const { boost::uint32_t prefix; // Don't let the first attempt throw. error_code first_try(lightweight); prefix = get_data<subtype_gid_gid_prefix, 2>(first_try); // If the first try failed, check again. if (first_try) prefix = get_data<subtype_prefix, 0>(ec); else if (&ec != &throws) ec = make_success_code(); return prefix; }
void set_thread_affinity_mask( boost::thread& thrd , mask_type mask , error_code& ec = throws ) const { // {{{ if (!SetThreadAffinityMask(thrd.native_handle(), DWORD_PTR(mask))) { HPX_THROWS_IF(ec, kernel_error , "hpx::threads::windows_topology::set_thread_affinity_mask" , boost::str(boost::format( "failed to set thread %1% affinity mask") % mask)); } else if (&ec != &throws) ec = make_success_code(); } // }}}
// Schedule the specified function for execution in this executor. // Depending on the subclass implementation, this may block in some // situations. void generic_thread_pool_executor::add( closure_type && f, char const* desc, threads::thread_state_enum initial_state, bool run_now, threads::thread_stacksize stacksize, error_code& ec) { // create a new thread thread_init_data data(util::bind( util::one_shot(&generic_thread_pool_executor::thread_function_nullary), std::move(f)), desc); data.stacksize = threads::get_stack_size(stacksize); threads::detail::create_thread(scheduler_base_, data, initial_state, run_now, ec); if (ec) return; if (&ec != &throws) ec = make_success_code(); }
thread_state set_thread_state(thread_id_type id, thread_state_enum state, thread_state_ex_enum stateex, thread_priority priority, error_code& ec) { hpx::applier::applier* app = hpx::applier::get_applier_ptr(); if (NULL == app) { HPX_THROWS_IF(ec, invalid_status, "hpx::threads::set_thread_state", "global applier object is not accessible"); return thread_state(unknown); } if (&ec != &throws) ec = make_success_code(); return app->get_thread_manager().set_state(id, state, stateex, priority, ec); }
thread_id_type set_thread_state(thread_id_type const& id, boost::posix_time::time_duration const& after, thread_state_enum state, thread_state_ex_enum stateex, thread_priority priority, error_code& ec) { hpx::applier::applier* app = hpx::applier::get_applier_ptr(); if (NULL == app) { HPX_THROWS_IF(ec, invalid_status, "hpx::threads::set_thread_state", "global applier object is not accessible"); return invalid_thread_id; } if (&ec != &throws) ec = make_success_code(); return app->get_thread_manager().set_state(after, id, state, stateex, priority, ec); }
response component_namespace::bind_name( request const& req , error_code& ec ) { // {{{ bind_name implementation // parameters std::string key = req.get_name(); std::unique_lock<mutex_type> l(mutex_); component_id_table_type::left_map::iterator it = component_ids_.left.find(key) , end = component_ids_.left.end(); // If the name is not in the table, register it (this is only done so // we can implement a backwards compatible get_component_id). if (it == end) { if (HPX_UNLIKELY(!util::insert_checked(component_ids_.left.insert( std::make_pair(key, type_counter)), it))) { l.unlock(); HPX_THROWS_IF(ec, lock_error , "component_namespace::bind_name" , "component id table insertion failed due to a locking " "error or memory corruption"); return response(); } // If the insertion succeeded, we need to increment the type // counter. ++type_counter; } LAGAS_(info) << (boost::format( "component_namespace::bind_name, key(%1%), ctype(%2%)") % key % it->second); if (&ec != &throws) ec = make_success_code(); return response(component_ns_bind_name, it->second); } // }}}
/////////////////////////////////////////////////////////////////////////// // sanity checks void mappings_sanity_checks(mapping_type& m, std::size_t size, bounds_type const& b, error_code& ec) { if (m.size() != 3) { HPX_THROWS_IF(ec, bad_parameter, "decode_mapping", "bad size of mappings specification array"); return; } std::size_t count_ranges = 0; for (std::size_t i = 0; i != 3; ++i) { bounds_type bounds = extract_bounds(m[i], size, ec); if (ec) return; if (bounds.size() > 1) { ++count_ranges; // FIXME: replace this with proper counting of processing units specified by // the affinity desc // if (b.begin() != b.end()) { // // threads have bounds ranges as well // if (b.end() - b.begin() > bounds.second - bounds.first) { // HPX_THROWS_IF(ec, bad_parameter, "decode_mapping", // boost::str(boost::format("the thread index range " // "is larger than the index range specified for " // "the %s node") % spec_type::type_name( // m[i].type_))); // return; // } // } } } if (count_ranges > 1) { HPX_THROWS_IF(ec, bad_parameter, "decode_mapping", "index ranges can be specified only for one node type " "(socket/numanode, core, or pu)"); return; } if (&ec != &throws) ec = make_success_code(); }
std::size_t get_numa_node_number( std::size_t num_thread , error_code& ec = throws ) const { // {{{ if (num_thread < numa_node_numbers_.size()) { if (&ec != &throws) ec = make_success_code(); return numa_node_numbers_[num_thread]; } HPX_THROWS_IF(ec, bad_parameter , "hpx::threads::windows_topology::get_numa_node_number" , boost::str(boost::format( "thread number %1% is out of range") % num_thread)); return std::size_t(-1); } // }}}
void thread_pool_os_executor<Scheduler>::add( closure_type && f, char const* desc, threads::thread_state_enum initial_state, bool run_now, threads::thread_stacksize stacksize, error_code& ec) { // create a new thread thread_init_data data(util::bind( util::one_shot(&thread_pool_os_executor::thread_function_nullary), std::move(f)), desc); data.stacksize = threads::get_stack_size(stacksize); threads::thread_id_type id = threads::invalid_thread_id; pool_.create_thread(data, id, initial_state, run_now, ec); if (ec) return; HPX_ASSERT(invalid_thread_id != id || !run_now); if (&ec != &throws) ec = make_success_code(); }
future_data_base<traits::detail::future_data_void>::state future_data_base<traits::detail::future_data_void>::wait(error_code& ec) { // block if this entry is empty state s = state_.load(std::memory_order_acquire); if (s == empty) { std::unique_lock<mutex_type> l(mtx_); s = state_.load(std::memory_order_relaxed); if (s == empty) { cond_.wait(l, "future_data_base::wait", ec); if (ec) return s; } } if (&ec != &throws) ec = make_success_code(); return s; }
/// \brief Create a full name of a counter from the contents of the given /// \a counter_path_elements instance. counter_status get_counter_type_name(counter_type_path_elements const& path, std::string& result, error_code& ec) { if (path.objectname_.empty()) { HPX_THROWS_IF(ec, bad_parameter, "get_counter_type_name", "empty counter object name"); return status_invalid_data; } result = "/"; result += path.objectname_; if (!path.countername_.empty()) { result += "/"; result += path.countername_; } if (&ec != &throws) ec = make_success_code(); return status_valid_data; }
/// Default discoverer function for AGAS performance counters; to be /// registered with the counter types. It is suitable to be used for all /// counters following the naming scheme: /// /// /<objectname>{locality#0/total}/<instancename> /// bool locality0_counter_discoverer(counter_info const& info, HPX_STD_FUNCTION<discover_counter_func> const& f, discover_counters_mode mode, error_code& ec) { performance_counters::counter_info i = info; // compose the counter name templates performance_counters::counter_path_elements p; performance_counters::counter_status status = get_counter_path_elements(info.fullname_, p, ec); if (!status_is_valid(status)) return false; if (mode == discover_counters_minimal || p.parentinstancename_.empty() || p.instancename_.empty()) { if (p.parentinstancename_.empty()) { p.parentinstancename_ = "locality"; p.parentinstanceindex_ = 0; } if (p.instancename_.empty()) { p.instancename_ = "total"; p.instanceindex_ = -1; } status = get_counter_name(p, i.fullname_, ec); if (!status_is_valid(status) || !f(i, ec) || ec) return false; } else if (!f(i, ec) || ec) { return false; } if (&ec != &throws) ec = make_success_code(); return true; }
mask_type get_thread_affinity_mask( std::size_t num_thread , bool numa_sensitive , error_code& ec = throws ) const { // {{{ if (num_thread < thread_affinity_masks_.size()) { if (&ec != &throws) ec = make_success_code(); return numa_sensitive ? ns_thread_affinity_masks_[num_thread] : thread_affinity_masks_[num_thread]; } HPX_THROWS_IF(ec, bad_parameter , "hpx::threads::windows_topology::get_thread_affinity_mask" , boost::str(boost::format( "thread number %1% is out of range") % num_thread)); return 0; } // }}}
void this_thread_executor<Scheduler>::add_at( boost::chrono::steady_clock::time_point const& abs_time, closure_type && f, char const* desc, threads::thread_stacksize stacksize, error_code& ec) { HPX_ASSERT(std::size_t(-1) != thread_num_); // if the scheduler was stopped, we need to restart it state expected = state_stopped; scheduler_.get_state(0).compare_exchange_strong(expected, state_starting); // create a new suspended thread thread_init_data data(util::bind( util::one_shot(&this_thread_executor::thread_function_nullary), this, std::move(f)), desc); data.stacksize = threads::get_stack_size(stacksize); threads::thread_id_type id = threads::invalid_thread_id; threads::detail::create_thread( //-V601 &scheduler_, data, id, suspended, true, ec); if (ec) return; HPX_ASSERT(invalid_thread_id != id); // would throw otherwise // update statistics ++tasks_scheduled_; // now schedule new thread for execution threads::detail::set_thread_state_timed(scheduler_, abs_time, id, ec); if (ec) { --tasks_scheduled_; return; } // execute scheduler directly, if necessary run(); if (&ec != &throws) ec = make_success_code(); }
std::size_t hwloc_topology::get_core_number( std::size_t num_thread , error_code& ec ) const { // {{{ std::size_t num_pu = num_thread % num_of_pus_; if (num_pu < core_numbers_.size()) { if (&ec != &throws) ec = make_success_code(); return core_numbers_[num_pu]; } HPX_THROWS_IF(ec, bad_parameter , "hpx::threads::hwloc_topology::get_core_number" , boost::str(boost::format( "thread number %1% is out of range") % num_thread)); return std::size_t(-1); } // }}}
response component_namespace::get_component_type_name( request const& req , error_code& ec ) { // {{{ get_component_type_name implementation components::component_type t = req.get_component_type(); std::lock_guard<mutex_type> l(mutex_); std::string result; if (t == components::component_invalid) { result = "component_invalid"; } else if (components::get_derived_type(t) == 0) { result = get_component_name(component_ids_.right, t); } else if (components::get_derived_type(t) != 0) { result = get_component_name(component_ids_.right, components::get_derived_type(t)); result += "/"; result += get_component_name(component_ids_.right, components::get_base_type(t)); } if (result.empty()) { LAGAS_(info) << (boost::format( "component_namespace::get_component_typename, \ key(%1%/%2%), response(no_success)") % int(components::get_derived_type(t)) % int(components::get_base_type(t))); if (&ec != &throws) ec = make_success_code(); return response(component_ns_get_component_type_name, no_success); }
void thread_pool_executor<Scheduler>::add( closure_type && f, char const* desc, threads::thread_state_enum initial_state, bool run_now, threads::thread_stacksize stacksize, error_code& ec) { // create a new thread thread_init_data data(util::bind( util::one_shot(&thread_pool_executor::thread_function_nullary), this, std::move(f)), desc); data.stacksize = threads::get_stack_size(stacksize); // update statistics ++tasks_scheduled_; threads::detail::create_thread(&scheduler_, data, initial_state, run_now, ec); if (ec) { --tasks_scheduled_; return; } if (&ec != &throws) ec = make_success_code(); }
// TODO: catch exceptions response component_namespace::iterate_types( request const& req , error_code& ec ) { // {{{ iterate implementation iterate_types_function_type f = req.get_iterate_types_function(); std::lock_guard<mutex_type> l(mutex_); for (component_id_table_type::left_map::iterator it = component_ids_.left.begin() , end = component_ids_.left.end(); it != end; ++it) { f(it->first, it->second); } LAGAS_(info) << "component_namespace::iterate_types"; if (&ec != &throws) ec = make_success_code(); return response(component_ns_iterate_types); } // }}}
// TODO: catch exceptions response symbol_namespace::iterate( request const& req , error_code& ec ) { // {{{ iterate implementation iterate_names_function_type f = req.get_iterate_names_function(); mutex_type::scoped_lock l(mutex_); for (gid_table_type::iterator it = gids_.begin() , end = gids_.end(); it != end; ++it) { f(it->first, it->second); } LAGAS_(info) << "symbol_namespace::iterate"; if (&ec != &throws) ec = make_success_code(); return response(symbol_ns_iterate_names); } // }}}
mask_type hwloc_topology::get_core_affinity_mask( std::size_t num_thread , bool numa_sensitive , error_code& ec ) const { std::size_t num_pu = num_thread % num_of_pus_; if (num_pu < core_affinity_masks_.size()) { if (&ec != &throws) ec = make_success_code(); return core_affinity_masks_[num_pu]; } HPX_THROWS_IF(ec, bad_parameter , "hpx::threads::hwloc_topology::get_core_affinity_mask" , boost::str(boost::format( "thread number %1% is out of range") % num_thread)); return 0; }
/////////////////////////////////////////////////////////////////////////// // sanity checks void mappings_sanity_checks(full_mapping_type& fmt, std::size_t size, bounds_type const& b, error_code& ec) { mapping_type& m = fmt.second; if (m.size() != 3) { HPX_THROWS_IF(ec, bad_parameter, "decode_mapping", "bad size of mappings specification array"); return; } if (b.begin() == b.end()) { HPX_THROWS_IF(ec, bad_parameter, "decode_mapping", boost::str(boost::format( "no %1% mapping bounds are specified" ) % spec_type::type_name(fmt.first.type_))); return; } if (&ec != &throws) ec = make_success_code(); }
future_status future_data_base<traits::detail::future_data_void>:: wait_until(util::steady_clock::time_point const& abs_time, error_code& ec) { // block if this entry is empty if (state_.load(std::memory_order_acquire) == empty) { std::unique_lock<mutex_type> l(mtx_); if (state_.load(std::memory_order_relaxed) == empty) { threads::thread_state_ex_enum const reason = cond_.wait_until(l, abs_time, "future_data_base::wait_until", ec); if (ec) return future_status::uninitialized; if (reason == threads::wait_timeout) return future_status::timeout; } } if (&ec != &throws) ec = make_success_code(); return future_status::ready; //-V110 }
naming::gid_type create(std::size_t count = 1, error_code& ec = throws) { Component* c = static_cast<Component*>(Component::create(count)); naming::gid_type gid = c->get_base_gid(); if (gid) { // everything is ok, return the new id if (&ec != &throws) ec = make_success_code(); return gid; } Component::destroy(c, count); hpx::util::osstream strm; strm << "global id " << gid << " is already bound to a different " "component instance"; HPX_THROWS_IF(ec, hpx::duplicate_component_address, "create<Component>", hpx::util::osstream_get_string(strm)); return naming::invalid_gid; }
// Request an initial resource allocation std::size_t resource_manager::initial_allocation( detail::manage_executor* proxy, error_code& ec) { if (nullptr == proxy) { HPX_THROWS_IF(ec, bad_parameter, "resource_manager::init_allocation", "manage_executor pointer is a nullptr"); return std::size_t(-1); } // ask executor for its policies error_code ec1(lightweight); std::size_t min_punits = proxy->get_policy_element(detail::min_concurrency, ec1); if (ec1) min_punits = 1; std::size_t max_punits = proxy->get_policy_element(detail::max_concurrency, ec1); if (ec1) max_punits = get_os_thread_count(); // lock the resource manager from this point on std::lock_guard<mutex_type> l(mtx_); // allocate initial resources for the given executor std::vector<std::pair<std::size_t, std::size_t> > cores = allocate_virt_cores(proxy, min_punits, max_punits, ec); if (ec) return std::size_t(-1); // attach the given proxy to this resource manager std::size_t cookie = ++next_cookie_; proxies_.insert(proxies_map_type::value_type( cookie, proxy_data(proxy, std::move(cores)))); if (&ec != &throws) ec = make_success_code(); return cookie; }