/* * Free function. Saves the current context in @p from * and restores the context in @p to. On windows the from * parameter is ignored. The current context is saved on the * current fiber. * Note that if the current thread is not a fiber, it will be * converted to fiber on the fly on call and unconverted before * return. This is expensive. The user should convert the * current thread to a fiber once on thread creation for better performance. * Note that we can't leave the thread unconverted on return or else we * will leak resources on thread destruction. Do the right thing by * default. */ friend void swap_context(fibers_context_impl_base& from, const fibers_context_impl_base& to, default_hint) { if(!is_fiber()) { HPX_ASSERT(from.m_ctx == 0); from.m_ctx = ConvertThreadToFiber(0); HPX_ASSERT(from.m_ctx != 0); #if HPX_HAVE_SWAP_CONTEXT_EMULATION != 0 switch_to_fiber(to.m_ctx); #else SwitchToFiber(to.m_ctx); #endif BOOL result = ConvertFiberToThread(); HPX_ASSERT(result); HPX_UNUSED(result); from.m_ctx = 0; } else { bool call_from_main = from.m_ctx == 0; if(call_from_main) from.m_ctx = GetCurrentFiber(); #if HPX_HAVE_SWAP_CONTEXT_EMULATION != 0 switch_to_fiber(to.m_ctx); #else SwitchToFiber(to.m_ctx); #endif if(call_from_main) from.m_ctx = 0; } }
explicit ucontext_context_impl(Functor & cb, std::ptrdiff_t stack_size) : m_stack_size(stack_size == -1 ? (std::ptrdiff_t)default_stack_size : stack_size), m_stack(alloc_stack(m_stack_size)), cb_(&cb) { HPX_ASSERT(m_stack); funp_ = &trampoline<Functor>; int error = HPX_COROUTINE_MAKE_CONTEXT( &m_ctx, m_stack, m_stack_size, funp_, cb_, nullptr); HPX_UNUSED(error); HPX_ASSERT(error == 0); #if defined(HPX_HAVE_THREAD_STACKOVERFLOW_DETECTION) // concept inspired by the following links: // // https://rethinkdb.com/blog/handling-stack-overflow-on-custom-stacks/ // http://www.evanjones.ca/software/threading.html // segv_stack.ss_sp = valloc(SEGV_STACK_SIZE); segv_stack.ss_flags = 0; segv_stack.ss_size = SEGV_STACK_SIZE; std::memset(&action, '\0', sizeof(action)); action.sa_flags = SA_SIGINFO|SA_ONSTACK; //SA_STACK action.sa_sigaction = &ucontext_context_impl::sigsegv_handler; sigaltstack(&segv_stack, nullptr); sigfillset(&action.sa_mask); sigaction(SIGSEGV, &action, nullptr); #endif }
/* * Free function. Saves the current context in @p from * and restores the context in @p to. */ friend void swap_context(ucontext_context_impl_base& from, const ucontext_context_impl_base& to, default_hint) { int error = HPX_COROUTINE_SWAP_CONTEXT(&from.m_ctx, &to.m_ctx); HPX_UNUSED(error); HPX_ASSERT(error == 0); }
void rebind_stack() { if (m_stack) { // just reset the context stack pointer to its initial value at the stack start increment_stack_recycle_count(); int error = HPX_COROUTINE_MAKE_CONTEXT( &m_ctx, m_stack, m_stack_size, (void (*)(void*))(funp_), &cb_, NULL); HPX_UNUSED(error); HPX_ASSERT(error == 0); } }
BOOST_FORCEINLINE result_type operator()(arg0_type arg0 = arg0_type()) { reset_on_exit on_exit = reset_on_exit(*this); HPX_UNUSED(on_exit); result_type result = f_(arg0); // invoke wrapped function // we always have to run to completion HPX_ASSERT(result == 5); // threads::terminated == 5 reset(); return result; }
explicit ucontext_context_impl(Functor& cb, std::ptrdiff_t stack_size) : m_stack_size(stack_size == -1? default_stack_size: stack_size), m_stack(alloc_stack(m_stack_size)) { HPX_ASSERT(m_stack); typedef void cb_type(Functor*); cb_type * cb_ptr = &trampoline<Functor>; int error = HPX_COROUTINE_MAKE_CONTEXT( &m_ctx, m_stack, m_stack_size, (void (*)(void*))(cb_ptr), &cb, NULL); HPX_UNUSED(error); HPX_ASSERT(error == 0); }
explicit ucontext_context_impl(Functor& cb, std::ptrdiff_t stack_size) : m_stack_size(stack_size == -1 ? (std::ptrdiff_t)default_stack_size : stack_size), m_stack(alloc_stack(m_stack_size)), cb_(cb) { HPX_ASSERT(m_stack); funp_ = &trampoline<Functor>; int error = HPX_COROUTINE_MAKE_CONTEXT( &m_ctx, m_stack, m_stack_size, (void (*)(void*))(funp_), &cb_, NULL); HPX_UNUSED(error); HPX_ASSERT(error == 0); }
virtual void copy_some_and_update(iterator_type, difference_type division_count, bool first_n) { HPX_ASSERT(this->count_ == 0); size_type new_count; if(first_n) { new_count = division_count; } else { HPX_ASSERT(difference_type(this->count_) >= division_count); new_count = this->count_ - division_count; } //This function should never called with a count different to zero HPX_ASSERT(new_count == 0); HPX_UNUSED(new_count); }
void measure_transform_reduce_old(std::size_t size) { std::vector<Point> data_representation(size, Point{double(std::rand()), double(std::rand())}); //invode old reduce Point result = hpx::parallel::reduce(hpx::parallel::par, boost::begin(data_representation), boost::end(data_representation), Point{0.0, 0.0}, [](Point res, Point curr) { return Point{ res.x * res.y + curr.x * curr.y, 1.0}; } ); HPX_UNUSED(result); }
void measure_transform_reduce(std::size_t size) { std::vector<Point> data_representation(size, Point{double(std::rand()), double(std::rand())}); // invode transform_reduce double result = hpx::parallel::transform_reduce(hpx::parallel::par, boost::begin(data_representation), boost::end(data_representation), [](Point r) { return r.x * r.y; }, 0.0, std::plus<double>() ); HPX_UNUSED(result); }
int main() { typedef hpx::lcos::local::shared_mutex shared_mutex_type; int data = 0; shared_mutex_type mtx; { boost::unique_lock<shared_mutex_type> l(mtx); data = 42; } { boost::shared_lock<shared_mutex_type> l(mtx); int i = data; HPX_UNUSED(i); } return 0; }
void barrier::release() { if (node_) { if (hpx::get_runtime_ptr() != nullptr && hpx::threads::threadmanager_is(state_running) && !hpx::is_stopped_or_shutting_down()) { // make sure this runs as an HPX thread if (hpx::threads::get_self_ptr() == nullptr) { hpx::threads::run_as_hpx_thread(&barrier::release, this); } hpx::future<void> f; if ((*node_)->num_ >= (*node_)->cut_off_ || (*node_)->rank_ == 0) { f = hpx::unregister_with_basename( (*node_)->base_name_, (*node_)->rank_); } // we need to wait on everyone to have its name unregistered, // and hold on to our node long enough... boost::intrusive_ptr<wrapping_type> node = node_; hpx::when_all(f, wait(hpx::launch::async)).then( hpx::launch::sync, [HPX_CAPTURE_MOVE(node)](hpx::future<void> f) { HPX_UNUSED(node); f.get(); } ).get(); } intrusive_ptr_release(node_->get()); node_.reset(); } }
bool dump_suspended_threads(std::size_t num_thread, Map& tm, boost::int64_t& idle_loop_count, bool running) { #ifndef HPX_THREAD_MINIMAL_DEADLOCK_DETECTION HPX_UNUSED(tm); HPX_UNUSED(idle_loop_count); HPX_UNUSED(running); //-V601 return false; #else if (!minimal_deadlock_detection) return false; if (HPX_LIKELY(idle_loop_count++ < HPX_IDLE_LOOP_COUNT_MAX)) return false; // reset idle loop count idle_loop_count = 0; bool result = false; bool collect_suspended = true; bool logged_headline = false; typename Map::const_iterator end = tm.end(); for (typename Map::const_iterator it = tm.begin(); it != end; ++it) { threads::thread_data const* thrd = (*it).get(); threads::thread_state state = thrd->get_state(); threads::thread_state marked_state = thrd->get_marked_state(); if (state != marked_state) { // log each thread only once if (!logged_headline) { if (running) { LTM_(error) //-V128 << "Listing suspended threads while queue (" << num_thread << ") is empty:"; } else { LHPX_CONSOLE_(hpx::util::logging::level::error) //-V128 << " [TM] Listing suspended threads while queue (" << num_thread << ") is empty:\n"; } logged_headline = true; } if (running) { LTM_(error) << "queue(" << num_thread << "): " //-V128 << get_thread_state_name(state) << "(" << std::hex << std::setw(8) << std::setfill('0') << (*it).get() << "." << std::hex << std::setw(2) << std::setfill('0') << thrd->get_thread_phase() << "/" << std::hex << std::setw(8) << std::setfill('0') << thrd->get_component_id() << ")" #ifdef HPX_HAVE_THREAD_PARENT_REFERENCE << " P" << std::hex << std::setw(8) << std::setfill('0') << thrd->get_parent_thread_id() #endif << ": " << thrd->get_description() << ": " << thrd->get_lco_description(); } else { LHPX_CONSOLE_(hpx::util::logging::level::error) << " [TM] " //-V128 << "queue(" << num_thread << "): " << get_thread_state_name(state) << "(" << std::hex << std::setw(8) << std::setfill('0') << (*it).get() << "." << std::hex << std::setw(2) << std::setfill('0') << thrd->get_thread_phase() << "/" << std::hex << std::setw(8) << std::setfill('0') << thrd->get_component_id() << ")" #ifdef HPX_HAVE_THREAD_PARENT_REFERENCE << " P" << std::hex << std::setw(8) << std::setfill('0') << thrd->get_parent_thread_id() #endif << ": " << thrd->get_description() << ": " << thrd->get_lco_description() << "\n"; } thrd->set_marked_state(state); // result should be true if we found only suspended threads if (collect_suspended) { switch(state.get_state()) { case threads::suspended: result = true; // at least one is suspended break; case threads::pending: case threads::active: result = false; // one is active, no deadlock (yet) collect_suspended = false; break; default: // If the thread is terminated we don't care too much // anymore. break; } } } } return result; #endif }
~prepare_main_thread() { BOOL result = ConvertFiberToThread(); HPX_ASSERT(FALSE != result); HPX_UNUSED(result); }
prepare_main_thread() { LPVOID result = ConvertThreadToFiber(0); HPX_ASSERT(0 != result); HPX_UNUSED(result); }
prepare_main_thread() noexcept { LPVOID result = ConvertThreadToFiber(nullptr); HPX_ASSERT(nullptr != result); HPX_UNUSED(result); }