Пример #1
0
 ///////////////////////////////////////////////////////////////////////
 // expand main counter name
 bool expand_counter_info_threads(
     counter_info& i, counter_path_elements& p,
     discover_counter_func const& f, error_code& ec)
 {
     std::size_t num_threads = get_os_thread_count();
     for (std::size_t l = 0; l != num_threads; ++l)
     {
         p.instanceindex_ = static_cast<boost::int64_t>(l);
         counter_status status = get_counter_name(p, i.fullname_, ec);
         if (!status_is_valid(status) || !f(i, ec) || ec)
             return false;
     }
     return true;
 }
Пример #2
0
// store all information required for static allocation in
// proxies_static_allocation_data
// also store new proxy scheduler
void resource_manager::preprocess_static_allocation(
    std::size_t min_punits, std::size_t max_punits)
{
    proxies_map_type::iterator it;
    proxies_static_allocation_data.clear();

    for (proxies_map_type::iterator it = proxies_.begin();
            it != proxies_.end(); ++it)
    {
        proxy_data& p = (*it).second;
        static_allocation_data st;
        st.proxy_ = p.proxy_;

        // ask executor for its policies
        error_code ec1(lightweight);
        st.min_proxy_cores_ =
            p.proxy_->get_policy_element(detail::min_concurrency, ec1);
        if (ec1) st.min_proxy_cores_ = 1;
        st.max_proxy_cores_ =
            p.proxy_->get_policy_element(detail::max_concurrency, ec1);
        if (ec1) st.max_proxy_cores_ = get_os_thread_count();

        st.num_borrowed_cores_ = 0;
        st.num_owned_cores_ = 0;

        for (coreids_type coreids : p.core_ids_)
        {
            if (punits_[coreids.first].use_count_ > 1)
                st.num_borrowed_cores_++;
            if (punits_[coreids.first].use_count_ == 1)
                st.num_owned_cores_++;
        }

        proxies_static_allocation_data.insert(
            allocation_data_map_type::value_type((*it).first , st));
    }

    std::size_t cookie = next_cookie_ + 1;

    static_allocation_data st;
    st.min_proxy_cores_ = min_punits;
    st.max_proxy_cores_ = max_punits;
    st.adjusted_desired_ = static_cast<double>(max_punits);
    st.num_cores_stolen_ = 0;
    proxies_static_allocation_data.insert(
        allocation_data_map_type::value_type(cookie , st));
}
Пример #3
0
// Request an initial resource allocation
std::size_t resource_manager::initial_allocation(
    detail::manage_executor* proxy, error_code& ec)
{
    if (nullptr == proxy) {
        HPX_THROWS_IF(ec, bad_parameter,
                      "resource_manager::init_allocation",
                      "manage_executor pointer is a nullptr");
        return std::size_t(-1);
    }

    // ask executor for its policies
    error_code ec1(lightweight);
    std::size_t min_punits =
        proxy->get_policy_element(detail::min_concurrency, ec1);
    if (ec1) min_punits = 1;
    std::size_t max_punits =
        proxy->get_policy_element(detail::max_concurrency, ec1);
    if (ec1) max_punits = get_os_thread_count();

    // lock the resource manager from this point on
    std::lock_guard<mutex_type> l(mtx_);

    // allocate initial resources for the given executor
    std::vector<std::pair<std::size_t, std::size_t> > cores =
        allocate_virt_cores(proxy, min_punits, max_punits, ec);
    if (ec) return std::size_t(-1);

    // attach the given proxy to this resource manager
    std::size_t cookie = ++next_cookie_;
    proxies_.insert(proxies_map_type::value_type(
                        cookie, proxy_data(proxy, std::move(cores))));

    if (&ec != &throws)
        ec = make_success_code();
    return cookie;
}
Пример #4
0
 static_priority_queue_os_executor::static_priority_queue_os_executor()
   : scheduled_executor(new detail::thread_pool_os_executor<
         policies::static_priority_queue_scheduler<> >(get_os_thread_count()))
 {}
Пример #5
0
 local_queue_os_executor::local_queue_os_executor()
   : scheduled_executor(new detail::thread_pool_os_executor<
         policies::local_queue_scheduler<> >(get_os_thread_count()))
 {}
Пример #6
0
 static_priority_queue_executor::static_priority_queue_executor()
   : scheduled_executor(new detail::thread_pool_executor<
         policies::static_priority_queue_scheduler<lcos::local::spinlock> >(
             get_os_thread_count(), 1))
 {}
Пример #7
0
 local_priority_queue_executor::local_priority_queue_executor()
   : scheduled_executor(new detail::thread_pool_executor<
         policies::local_priority_queue_scheduler<> >(
             get_os_thread_count(), 1, "local_priority_queue_executor"))
 {}
Пример #8
0
 throttle_queue_executor::throttle_queue_executor()
   : scheduled_executor(new detail::thread_pool_executor<
         policies::throttle_queue_scheduler<> >(
             get_os_thread_count(), 1))
 {}
Пример #9
0
 static_queue_executor::static_queue_executor()
   : scheduled_executor(new detail::thread_pool_executor<
         policies::static_queue_scheduler<> >(
             get_os_thread_count(), 1, "static_queue_executor"))
 {}
Пример #10
0
// the resource manager is locked while executing this function
std::vector<std::pair<std::size_t, std::size_t> >
resource_manager::allocate_virt_cores(
    detail::manage_executor* proxy, std::size_t min_punits,
    std::size_t max_punits, error_code& ec)
{
    std::vector<coreids_type> core_ids;

    // array of available processing units
    std::vector<punit_status> available_punits(
        get_os_thread_count(), punit_status::unassigned);

    // find all available processing units with zero use count
    std::size_t reserved = reserve_processing_units(0, max_punits,
                           available_punits);
    if (reserved < max_punits)
    {
        // insufficient available cores found, try to share
        // processing units
        preprocess_static_allocation(min_punits, max_punits);

        reserved += release_cores_on_existing_schedulers(
                        release_borrowed_cores, available_punits);

        if(reserved < max_punits)
        {
            reserved += redistribute_cores_among_all(
                            reserved, min_punits, max_punits, available_punits);

            if (reserved < min_punits)
            {
                reserved += release_cores_on_existing_schedulers(
                                release_cores_to_min, available_punits);
                if (reserved < min_punits)
                {
                    reserve_at_higher_use_count(
                        min_punits - reserved , available_punits);
                }
            }
        }
    }

    // processing units found, inform scheduler
    std::size_t punit = 0;
    for (std::size_t i = 0; i != available_punits.size(); ++i)
    {
        if (available_punits[i] == punit_status::reserved) //-V104
        {
            proxy->add_processing_unit(punit, i, ec);
            if (ec) break;

            core_ids.push_back(std::make_pair(i, punit));
            ++punit;

            // update use count for reserved processing units
            ++punits_[i].use_count_;
        }
    }
    HPX_ASSERT(punit <= max_punits);

    if (ec) {
        // on error, remove the already assigned virtual cores
        for (std::size_t j = 0; j != punit; ++j)
        {
            proxy->remove_processing_unit(j, ec);
            --punits_[j].use_count_;
        }
        return std::vector<coreids_type>();
    }

    if (&ec != &throws)
        ec = make_success_code();
    return core_ids;
}
Пример #11
0
resource_manager::resource_manager()
    : next_cookie_(0),
      punits_(get_os_thread_count()),
      topology_(get_topology())
{}