void apply(BOOST_SCOPED_ENUM(launch) /*policy*/, naming::address& addr,
        naming::id_type const& gid, HPX_ENUM_FWD_ARGS(N, Arg, arg))
    {
        util::block_profiler_wrapper<profiler_tag> bp(apply_logger_);

        if (addr.locality_ == hpx::get_locality()) {
            // local, direct execution
            HPX_ASSERT(components::types_are_compatible(addr.type_,
                components::get_component_type<
                    typename action_type::component_type>()));

            (*this->impl_)->set_data(
                std::move(action_type::execute_function(addr.address_,
                    util::forward_as_tuple(HPX_ENUM_FORWARD_ARGS(N, Arg, arg))))
            );
        }
        else {
            // remote execution
            hpx::applier::detail::apply_c_cb<action_type>(
                addr, this->get_gid(), gid,
                HPX_STD_BIND(&packaged_action::parcel_write_handler, this->impl_,
                    HPX_STD_PLACEHOLDERS::_1),
                HPX_ENUM_FORWARD_ARGS(N, Arg, arg));
        }
    }
示例#2
0
文件: osu_bibw.cpp 项目: eile/hpx
void run_benchmark(boost::program_options::variables_map & vm)
{
    // use the first remote locality to bounce messages, if possible
    hpx::id_type here = hpx::find_here();

    hpx::id_type there = here;
    std::vector<hpx::id_type> localities = hpx::find_remote_localities();
    if (!localities.empty())
        there = localities[0];

    // perform actual measurements
    for (std::size_t size = 1; size <= MAX_MSG_SIZE; size *= 2)
    {
        std::size_t loop = LOOP_SMALL;
        std::size_t skip = SKIP_SMALL;
        std::size_t window_size = WINDOW_SIZE_SMALL;

        if (size > LARGE_MESSAGE_SIZE) {
            loop = LOOP_LARGE;
            skip = SKIP_LARGE;
            window_size = WINDOW_SIZE_LARGE;
        }

        hpx::util::high_resolution_timer t;
        for(std::size_t i = 0; i < loop + skip; ++i)
        {
            if(i == skip) t.restart();

            typedef hpx::util::serialize_buffer<char> buffer_type;
            hpx::future<std::vector<hpx::future<buffer_type> > >recv_futures
                = hpx::async(HPX_STD_BIND(&recv_async, there, size, window_size));

            hpx::future<std::vector<hpx::future<void> > > send_futures
                = hpx::async(HPX_STD_BIND(&send_async, there, size, window_size));

            /*
            std::vector<buffer_type> recv_results;
            recv_results.reserve(window_size);
            */
            hpx::wait_all(recv_futures.get());//, recv_results);
            hpx::wait_all(send_futures.get());
        }

        double bw = (size / 1e6 * loop * window_size * 2)/ t.elapsed();
        hpx::cout << std::left << std::setw(10) << size << bw << hpx::endl << hpx::flush;
    }
}
示例#3
0
文件: main.cpp 项目: jotrk/vanDouken
JNIEXPORT void JNICALL Java_com_stellar_hpx_VisualizationRenderer_init(JNIEnv * env, jobject thiz, int texture)
{
    if(!hpiif().flow)
    {
        hpx::android::register_callback<void>(
            "hpiif_start"
          , HPX_STD_BIND(
                hpiif_start
            )
        );
        hpx::android::register_callback<void(float, float, float, float)>(
            "new_force"
          , HPX_STD_BIND(
                new_force
              , HPX_STD_PLACEHOLDERS::_1
              , HPX_STD_PLACEHOLDERS::_2
              , HPX_STD_PLACEHOLDERS::_3
              , HPX_STD_PLACEHOLDERS::_4
            )
        );
        hpx::android::register_callback<void(std::vector<jbyte> const &, int, int)>(
            "new_image"
          , HPX_STD_BIND(
                new_image
              , HPX_STD_PLACEHOLDERS::_1
              , HPX_STD_PLACEHOLDERS::_2
              , HPX_STD_PLACEHOLDERS::_3
            )
        );
        hpx::android::new_action("hpiif_start");
        while(!hpiif().flow)
        {
            sleep(1);
        }
        hpiif().flow->initializeGL(texture);
    }
}
// Make sure all pending requests are handled
void connection_handler::background_work()
{
    if (stopped_)
        return;

    // Atomically set handling_messages_ to true, if another work item hasn't
    // started executing before us.
    bool false_ = false;
    if (!handling_messages_.compare_exchange_strong(false_, true))
        return;

    if(!hpx::is_starting() && !use_io_pool_)
    {
        hpx::applier::register_thread_nullary(
            HPX_STD_BIND(&connection_handler::handle_messages, this),
            "ibverbs::connection_handler::handle_messages",
            threads::pending, true, threads::thread_priority_critical);
    }
    else
    {
        boost::asio::io_service& io_service = io_service_pool_.get_io_service(0);
        io_service.post(HPX_STD_BIND(&connection_handler::handle_messages, this));
    }
}
bool connection_handler::do_run()
{
    // initialize network
    std::size_t tried = 0;
    exception_list errors;
    naming::locality::iterator_type end = accept_end(here_);
    for (naming::locality::iterator_type it =
                accept_begin(here_, io_service_pool_.get_io_service(0), true);
            it != end; ++it, ++tried)
    {
        try {
            boost::asio::ip::tcp::endpoint ep = *it;

            acceptor_.bind(ep, boost::system::throws);
            ++tried;
            break;
        }
        catch (boost::system::system_error const& e) {
            errors.add(e);   // store all errors
            continue;
        }
    }

    if (errors.get_error_count() == tried) {
        // all attempts failed
        HPX_THROW_EXCEPTION(network_error,
                            "ibverbs::connection_handler::run", errors.get_message());
        return false;
    }
    time_send = 0;
    time_recv = 0;
    time_acct = 0;

    handling_accepts_ = true;
    boost::asio::io_service& io_service = io_service_pool_.get_io_service(1);
    io_service.post(HPX_STD_BIND(&connection_handler::handle_accepts, this));

    background_work();
    return true;
}
示例#6
0
    void parcelport::send_pending_parcels_trampoline(
        naming::locality const& locality_id)
    {
        parcelport_connection_ptr client_connection(
            connection_cache_.get(locality_id));

        // If another thread was faster ... try again
        if (!client_connection)
            return;

        std::vector<parcel> parcels;
        std::vector<write_handler_type> handlers;

        typedef pending_parcels_map::iterator iterator;
        
        util::spinlock::scoped_lock l(mtx_);
        iterator it = pending_parcels_.find(locality_id);

        if (it != pending_parcels_.end())
        {
            std::swap(parcels, it->second.first);
            std::swap(handlers, it->second.second);
        }

        if (!parcels.empty() && !handlers.empty())
        {
            // create a new thread which sends parcels that might still be pending
            hpx::applier::register_thread_nullary(
                HPX_STD_BIND(&parcelport::send_pending_parcels, this,
                    client_connection, boost::move(parcels),
                    boost::move(handlers)), "send_pending_parcels");
        }
        else
        {
            BOOST_ASSERT(locality_id == client_connection->destination());
            connection_cache_.reclaim(locality_id, client_connection);
        }
    }
示例#7
0
    void parcelport::send_pending_parcels_trampoline(
        boost::system::error_code const& ec,
        naming::locality const& locality_id,
        parcelport_connection_ptr client_connection)
    {
        {
            lcos::local::spinlock::scoped_lock l(mtx_);

            // Give this connection back to the cache as it's not  needed anymore.
            BOOST_ASSERT(locality_id == client_connection->destination());
            connection_cache_.reclaim(locality_id, client_connection);

            pending_parcels_map::iterator it = pending_parcels_.find(locality_id);
            if (it == pending_parcels_.end() || it->second.first.empty())
                return;
        }

        // Create a new HPX thread which sends parcels that are still pending.
        hpx::applier::register_thread_nullary(
            HPX_STD_BIND(&parcelport::retry_sending_parcels, 
                this->shared_from_this(), locality_id), "retry_sending_parcels",
                threads::pending, true, threads::thread_priority_critical);
    }
示例#8
0
        return id(native_handle());
    }

    unsigned thread::hardware_concurrency() BOOST_NOEXCEPT
    {
#if defined(__ANDROID__) || defined(ANDROID)
        return ::android_getCpuCount();
#else
        return boost::thread::hardware_concurrency();
#endif
    }

    void thread::start_thread(BOOST_RV_REF(HPX_STD_FUNCTION<void()>) func)
    {
        threads::thread_init_data data(
            HPX_STD_BIND(&thread::thread_function_nullary, boost::move(func)),
            "thread::thread_function_nullary");

        error_code ec(lightweight);
        threads::thread_id_type ident = hpx::get_runtime().get_thread_manager().
            register_thread(data, threads::suspended, true, ec);
        if (ec) {
            HPX_THROW_EXCEPTION(thread_resource_error, "thread::start_thread",
                "Could not create thread");
            return;
        }

        // inform ourselves if the thread function exits
        threads::add_thread_exit_callback(ident, util::bind(&thread::detach, this));

        // now start the thread
// Copyright (c) 2012-2013 Thomas Heller
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)

// This file has been automatically generated using the Boost.Wave tool.
// Do not edit manually.


template <typename Arg0>
void apply(BOOST_SCOPED_ENUM(launch) policy, naming::id_type const& gid,
           BOOST_FWD_REF(Arg0) arg0)
{
    util::block_profiler_wrapper<profiler_tag> bp(apply_logger_);
    hpx::apply_c_cb<action_type>(this->get_gid(), gid,
                                 HPX_STD_BIND(&packaged_action::parcel_write_handler, this->impl_,
                                         HPX_STD_PLACEHOLDERS::_1),
                                 boost::forward<Arg0>( arg0 ));
}
template <typename Arg0>
void apply(BOOST_SCOPED_ENUM(launch) policy, naming::address& addr,
           naming::id_type const& gid, BOOST_FWD_REF(Arg0) arg0)
{
    util::block_profiler_wrapper<profiler_tag> bp(apply_logger_);
    hpx::apply_c_cb<action_type>(this->get_gid(), addr, gid,
                                 HPX_STD_BIND(&packaged_action::parcel_write_handler, this->impl_,
                                         HPX_STD_PLACEHOLDERS::_1),
                                 boost::forward<Arg0>( arg0 ));
}
template <typename Arg0>
void apply_p(BOOST_SCOPED_ENUM(launch) policy, naming::id_type const& gid,
             threads::thread_priority priority, BOOST_FWD_REF(Arg0) arg0)
示例#10
0
void decode_parcels(Parcelport & parcelport, Connection & connection,
                    boost::shared_ptr<Buffer> buffer)
{
    typedef typename Buffer::transmission_chunk_type transmission_chunk_type;

    // add parcel data to incoming parcel queue
    std::size_t num_zero_copy_chunks =
        static_cast<std::size_t>(
            static_cast<boost::uint32_t>(buffer->num_chunks_.first));

//        boost::shared_ptr<std::vector<std::vector<char> > > in_chunks(in_chunks_);
    boost::shared_ptr<std::vector<util::serialization_chunk> > chunks;
    if (num_zero_copy_chunks != 0) {
        // decode chunk information
        chunks = boost::make_shared<std::vector<util::serialization_chunk> >();

        std::size_t num_non_zero_copy_chunks =
            static_cast<std::size_t>(
                static_cast<boost::uint32_t>(buffer->num_chunks_.second));

        chunks->resize(num_zero_copy_chunks + num_non_zero_copy_chunks);

        // place the zero-copy chunks at their spots first
        for (std::size_t i = 0; i != num_zero_copy_chunks; ++i)
        {
            transmission_chunk_type& c = buffer->transmission_chunks_[i];
            boost::uint64_t first = c.first, second = c.second;

            HPX_ASSERT(buffer->chunks_[i].size() == second);

            (*chunks)[first] = util::create_pointer_chunk(
                                   buffer->chunks_[i].data(), second);
        }

        std::size_t index = 0;
        for (std::size_t i = num_zero_copy_chunks;
                i != num_zero_copy_chunks + num_non_zero_copy_chunks;
                ++i)
        {
            transmission_chunk_type& c = buffer->transmission_chunks_[i];
            boost::uint64_t first = c.first, second = c.second;

            // find next free entry
            while ((*chunks)[index].size_ != 0)
                ++index;

            // place the index based chunk at the right spot
            (*chunks)[index] = util::create_index_chunk(first, second);
            ++index;
        }
        HPX_ASSERT(index == num_zero_copy_chunks + num_non_zero_copy_chunks);
    }
    bool first_message = false;
#if defined(HPX_HAVE_SECURITY)
    if(connection.first_message_)
    {
        connection.first_message_ = false;
        first_message = true;
    }
#endif
    HPX_ASSERT(!buffer->parcels_decoded_);
    if(hpx::is_running() && parcelport.async_serialization())
    {
        hpx::applier::register_thread_nullary(
            HPX_STD_BIND(
                &decode_parcels_impl<Parcelport, Buffer>,
                boost::ref(parcelport), buffer, chunks, first_message),
            "decode_parcels",
            threads::pending, true, threads::thread_priority_critical);
    }
    else
    {
        decode_parcels_impl(parcelport, buffer, chunks, first_message);
    }
}