예제 #1
0
    void this_thread_executor<Scheduler>::suspend_back_into_calling_context()
    {
        // Give invoking context a chance to catch up with its tasks, but only
        // if this scheduler is currently in running state (this scheduler is
        // always in stopping state as it has to exit as early as possible).
        boost::atomic<hpx::state>& state = scheduler_.get_state(0);
        hpx::state expected = state_stopping;
        if (state.compare_exchange_strong(expected, state_suspended))
        {
            {
                threads::detail::reset_tss_helper reset_on_exit(
                    parent_thread_num_);
                on_self_reset on_exit(self_);

                this_thread::suspend();
            }

            // reset state to running if current state is still suspended
            expected = state_suspended;
            state.compare_exchange_strong(expected, state_stopping);
        }
        else
        {
            HPX_ASSERT(expected != state_suspended);
        }
    }
예제 #2
0
        BOOST_FORCEINLINE result_type operator()(arg0_type arg0 = arg0_type())
        {
            reset_on_exit on_exit = reset_on_exit(*this);
            HPX_UNUSED(on_exit);

            result_type result = f_(arg0);   // invoke wrapped function

            // we always have to run to completion
            HPX_ASSERT(result == 5);       // threads::terminated == 5
            reset();
            return result;
        }
예제 #3
0
    void this_thread_executor<Scheduler>::run()
    {
        // We want to exit this scheduling loop as early as possible, thus
        // we use 'state_stopping' instead of 'state_running'.

        // Set the state to 'state_stopping' only if it's still in
        // 'state_starting' state, otherwise our destructor is currently being
        // executed, which means we need to still execute all threads.
        boost::atomic<hpx::state>& state = scheduler_.get_state(0);
        hpx::state expected = state_starting;
        if (state.compare_exchange_strong(expected, state_stopping))
        {
            {
                typename mutex_type::scoped_lock l(mtx_);
                scheduler_.add_punit(0, thread_num_);
                scheduler_.on_start_thread(0);
            }

            self_ = threads::get_self_ptr();

            this_thread_on_run_exit on_exit(shutdown_sem_, self_);

            // manage the thread num
            HPX_ASSERT(orig_thread_num_ != std::size_t(-1));

            threads::detail::reset_tss_helper reset_on_exit(orig_thread_num_);
            parent_thread_num_ = reset_on_exit.previous_thread_num();

            // FIXME: turn these values into performance counters
            boost::int64_t executed_threads = 0, executed_thread_phases = 0;
            boost::uint64_t overall_times = 0, thread_times = 0;

            threads::detail::scheduling_counters counters(
                executed_threads, executed_thread_phases,
                overall_times, thread_times);


            threads::detail::scheduling_callbacks callbacks(
                threads::detail::scheduling_callbacks::callback_type(),
                util::bind( //-V107
                    &this_thread_executor::suspend_back_into_calling_context,
                    this));

            threads::detail::scheduling_loop(0, scheduler_, counters, callbacks);

            // the scheduling_loop is allowed to exit only if no more HPX
            // threads exist
            HPX_ASSERT(!scheduler_.get_thread_count(
                unknown, thread_priority_default, 0) ||
                state == state_terminating);
        }
    }