/////////////////////////////////////////////////////////////////////////// // Call-back function for parcelHandler to call when new parcels are received void action_manager::fetch_parcel( parcelset::parcelhandler& parcel_handler, naming::gid_type const& parcel_id) { parcelset::parcel p; if (!parcel_handler.get_parcel(p, parcel_id)) return; while (threads::threadmanager_is(starting)) { boost::this_thread::sleep(boost::get_system_time() + boost::posix_time::milliseconds(HPX_NETWORK_RETRIES_SLEEP)); } // Give up if we're shutting down. if (threads::threadmanager_is(stopping)) { LPT_(debug) << "action_manager: fetch_parcel: dropping late " "parcel " << p; return; } // write this parcel to the log LPT_(debug) << "action_manager: fetch_parcel: " << p; appl_.schedule_action(p); }
/// Handle completion of a read operation. void parcelport::handle_read_completion(boost::system::error_code const& e, server::parcelport_connection_ptr) { if (e && e != boost::asio::error::operation_aborted && e != boost::asio::error::eof) { LPT_(error) << "handle read operation completion: error: " << e.message(); } }
void encode_finalize(Buffer & buffer, std::size_t arg_size) { buffer.size_ = buffer.data_.size(); buffer.data_size_ = arg_size; LPT_(debug) << binary_archive_content(buffer); performance_counters::parcels::data_point& data = buffer.data_point_; data.bytes_ = arg_size; data.raw_bytes_ = buffer.data_.size(); // prepare chunk data for transmission, the transmission_chunks data // first holds all zero-copy, then all non-zero-copy chunk infos typedef typename Buffer::transmission_chunk_type transmission_chunk_type; typedef typename Buffer::count_chunks_type count_chunks_type; std::vector<transmission_chunk_type>& chunks = buffer.transmission_chunks_; chunks.clear(); chunks.reserve(buffer.chunks_.size()); std::size_t index = 0; for (serialization::serialization_chunk& c : buffer.chunks_) { if (c.type_ == serialization::chunk_type_pointer) chunks.push_back(transmission_chunk_type(index, c.size_)); ++index; } buffer.num_chunks_ = count_chunks_type( static_cast<boost::uint32_t>(chunks.size()), static_cast<boost::uint32_t>(buffer.chunks_.size() - chunks.size()) ); if (!chunks.empty()) { // the remaining number of chunks are non-zero-copy for (serialization::serialization_chunk& c : buffer.chunks_) { if (c.type_ == serialization::chunk_type_index) { chunks.push_back( transmission_chunk_type(c.data_.index_, c.size_)); } } } }
// Handle completion of a read operation. void parcelport::handle_read_completion(boost::system::error_code const& e, server::shmem::parcelport_connection_ptr c) { if (!e) return; if (e != boost::asio::error::operation_aborted && e != boost::asio::error::eof) { LPT_(error) << "handle read operation completion: error: " << e.message(); // remove this connection from the list of known connections lcos::local::spinlock::scoped_lock l(mtx_); accepted_connections_.erase(c); } }
// Handle completion of a read operation. void connection_handler::handle_read_completion( boost::system::error_code const& e, boost::shared_ptr<receiver> receiver_conn) { if (!e) return; if (e != boost::asio::error::operation_aborted && e != boost::asio::error::eof) { LPT_(error) << "handle read operation completion: error: " << e.message(); } // if (e != boost::asio::error::eof) { // remove this connection from the list of known connections lcos::local::spinlock::scoped_lock l(mtx_); accepted_connections_.erase(receiver_conn); } }
void decode_message(parcelport& pp, parcelset::shmem::data_buffer parcel_data, performance_counters::parcels::data_point receive_data) { unsigned archive_flags = boost::archive::no_header; if (!pp.allow_array_optimizations()) archive_flags |= util::disable_array_optimization; archive_flags |= util::disable_data_chunking; // protect from un-handled exceptions bubbling up try { try { // mark start of serialization util::high_resolution_timer timer; boost::int64_t overall_add_parcel_time = 0; { // De-serialize the parcel data data_buffer::data_buffer_type const& buffer = parcel_data.get_buffer(); util::portable_binary_iarchive archive( buffer, buffer.size(), archive_flags); std::size_t parcel_count = 0; archive >> parcel_count; for(std::size_t i = 0; i < parcel_count; ++i) { // de-serialize parcel and add it to incoming parcel queue parcel p; archive >> p; // make sure this parcel ended up on the right locality BOOST_ASSERT(p.get_destination_locality() == pp.here()); // be sure not to measure add_parcel as serialization time boost::int64_t add_parcel_time = timer.elapsed_nanoseconds(); pp.add_received_parcel(p); overall_add_parcel_time += timer.elapsed_nanoseconds() - add_parcel_time; } // complete received data with parcel count receive_data.num_parcels_ = parcel_count; receive_data.raw_bytes_ = archive.bytes_read(); // amount of uncompressed data } // store the time required for serialization receive_data.serialization_time_ = timer.elapsed_nanoseconds() - overall_add_parcel_time; pp.add_received_data(receive_data); } catch (hpx::exception const& e) { LPT_(error) << "decode_message: caught hpx::exception: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::system::system_error const& e) { LPT_(error) << "decode_message: caught boost::system::error: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::exception const&) { LPT_(error) << "decode_message: caught boost::exception."; hpx::report_error(boost::current_exception()); } catch (std::exception const& e) { // We have to repackage all exceptions thrown by the // serialization library as otherwise we will loose the // e.what() description of the problem, due to slicing. boost::throw_exception(boost::enable_error_info( hpx::exception(serialization_error, e.what()))); } } catch (...) { LPT_(error) << "decode_message: caught unknown exception."; hpx::report_error(boost::current_exception()); } }
std::size_t encode_parcels(parcel const * ps, std::size_t num_parcels, Buffer & buffer, int archive_flags_, boost::uint64_t max_outbound_size) { HPX_ASSERT(buffer.data_.empty()); // collect argument sizes from parcels std::size_t arg_size = 0; boost::uint32_t dest_locality_id = ps[0].get_destination_locality_id(); std::size_t parcels_sent = 0; std::size_t parcels_size = 1; if(num_parcels != std::size_t(-1)) parcels_size = num_parcels; // guard against serialization errors try { try { // Get the chunk size from the allocator if it supports it size_t chunk_default = hpx::traits::default_chunk_size< typename Buffer::allocator_type >::call(buffer.data_.get_allocator()); // preallocate data for (/**/; parcels_sent != parcels_size; ++parcels_sent) { if (arg_size >= max_outbound_size) break; arg_size += traits::get_type_size(ps[parcels_sent], archive_flags_); } buffer.data_.reserve((std::max)(chunk_default, arg_size)); // mark start of serialization util::high_resolution_timer timer; { // Serialize the data std::unique_ptr<serialization::binary_filter> filter( ps[0].get_serialization_filter()); int archive_flags = archive_flags_; if (filter.get() != 0) { filter->set_max_length(buffer.data_.capacity()); archive_flags |= serialization::enable_compression; } serialization::output_archive archive( buffer.data_ , archive_flags , dest_locality_id , &buffer.chunks_ , filter.get()); if(num_parcels != std::size_t(-1)) archive << parcels_sent; for(std::size_t i = 0; i != parcels_sent; ++i) archive << ps[i]; arg_size = archive.bytes_written(); } // store the time required for serialization buffer.data_point_.serialization_time_ = timer.elapsed_nanoseconds(); } catch (hpx::exception const& e) { LPT_(fatal) << "encode_parcels: " "caught hpx::exception: " << e.what(); hpx::report_error(boost::current_exception()); return 0; } catch (boost::system::system_error const& e) { LPT_(fatal) << "encode_parcels: " "caught boost::system::error: " << e.what(); hpx::report_error(boost::current_exception()); return 0; } catch (boost::exception const&) { LPT_(fatal) << "encode_parcels: " "caught boost::exception"; hpx::report_error(boost::current_exception()); return 0; } catch (std::exception const& e) { // We have to repackage all exceptions thrown by the // serialization library as otherwise we will loose the // e.what() description of the problem, due to slicing. boost::throw_exception(boost::enable_error_info( hpx::exception(serialization_error, e.what()))); return 0; } } catch (...) { LPT_(fatal) << "encode_parcels: " "caught unknown exception"; hpx::report_error(boost::current_exception()); return 0; } buffer.data_point_.num_parcels_ = parcels_sent; encode_finalize(buffer, arg_size); return parcels_sent; }
void decode_message(Parcelport & pp, boost::shared_ptr<Buffer> buffer, std::vector<util::serialization_chunk> const *chunks, bool first_message = false) { unsigned archive_flags = boost::archive::no_header; if (!pp.allow_array_optimizations()) { archive_flags |= util::disable_array_optimization; archive_flags |= util::disable_data_chunking; } else if (!pp.allow_zero_copy_optimizations()) { archive_flags |= util::disable_data_chunking; } boost::uint64_t inbound_data_size = buffer->data_size_; // protect from un-handled exceptions bubbling up try { try { // mark start of serialization util::high_resolution_timer timer; boost::int64_t overall_add_parcel_time = 0; performance_counters::parcels::data_point& data = buffer->data_point_; { // De-serialize the parcel data util::portable_binary_iarchive archive(buffer->data_, chunks, inbound_data_size, archive_flags); std::size_t parcel_count = 0; archive >> parcel_count; //-V128 for(std::size_t i = 0; i != parcel_count; ++i) { #if defined(HPX_HAVE_SECURITY) naming::gid_type parcel_id; if (pp.enable_security()) { // handle certificate and verify parcel suffix once first_message = deserialize_certificate(archive, first_message); if (!first_message && i == 0) { verify_message_suffix(buffer->data_, buffer->data_point_, parcel_id); } } // de-serialize parcel and add it to incoming parcel queue parcel p; archive >> p; // verify parcel id, but only once while handling the // first parcel if (pp.enable_security() && !first_message && i == 0 && parcel_id != p.get_parcel_id()) { // again, all hell breaks loose HPX_THROW_EXCEPTION(security_error, "decode_message", "parcel id mismatch"); return; } #else // de-serialize parcel and add it to incoming parcel queue parcel p; archive >> p; #endif // make sure this parcel ended up on the right locality HPX_ASSERT(p.get_destination_locality() == pp.here()); // be sure not to measure add_parcel as serialization time boost::int64_t add_parcel_time = timer.elapsed_nanoseconds(); pp.add_received_parcel(p); overall_add_parcel_time += timer.elapsed_nanoseconds() - add_parcel_time; } // complete received data with parcel count data.num_parcels_ = parcel_count; data.raw_bytes_ = archive.bytes_read(); } // store the time required for serialization data.serialization_time_ = timer.elapsed_nanoseconds() - overall_add_parcel_time; pp.add_received_data(data); } catch (hpx::exception const& e) { LPT_(error) << "decode_message: caught hpx::exception: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::system::system_error const& e) { LPT_(error) << "decode_message: caught boost::system::error: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::exception const&) { LPT_(error) << "decode_message: caught boost::exception."; hpx::report_error(boost::current_exception()); } catch (std::exception const& e) { // We have to repackage all exceptions thrown by the // serialization library as otherwise we will loose the // e.what() description of the problem, due to slicing. boost::throw_exception(boost::enable_error_info( hpx::exception(serialization_error, e.what()))); } } catch (...) { LPT_(error) << "decode_message: caught unknown exception."; hpx::report_error(boost::current_exception()); } }
/////////////////////////////////////////////////////////////////////////// // Call-back function for parcelHandler to call when new parcels are received void action_manager::fetch_parcel( parcelset::parcelhandler& parcel_handler, naming::gid_type const& parcel_id) { parcelset::parcel p; if (!parcel_handler.get_parcel(p, parcel_id)) return; while (threads::threadmanager_is(starting)) { boost::this_thread::sleep(boost::get_system_time() + boost::posix_time::milliseconds(HPX_NETWORK_RETRIES_SLEEP)); } // Give up if we're shutting down. if (threads::threadmanager_is(stopping)) { LPT_(debug) << "action_manager: fetch_parcel: dropping late " "parcel " << p; return; } // write this parcel to the log LPT_(debug) << "action_manager: fetch_parcel: " << p; // decode the action-type in the parcel continuation_type cont = p.get_continuation(); actions::action_type act = p.get_action(); actions::base_action::action_type acttype = act->get_action_type(); int comptype = act->get_component_type(); naming::locality dest = p.get_destination_locality(); // fetch the set of destinations std::vector<naming::address> const& addrs = p.get_destination_addrs(); // if the parcel carries a continuation it should be directed to a // single destination BOOST_ASSERT(!cont || addrs.size() == 1); // schedule a thread for each of the destinations threads::threadmanager_base& tm = appl_.get_thread_manager(); std::vector<naming::address>::const_iterator end = addrs.end(); for (std::vector<naming::address>::const_iterator it = addrs.begin(); it != end; ++it) { naming::address const& addr = *it; // make sure this parcel destination matches the proper locality BOOST_ASSERT(dest == addr.locality_); // decode the local virtual address of the parcel naming::address::address_type lva = addr.address_; // by convention, a zero address references the local runtime // support component if (0 == lva) lva = appl_.get_runtime_support_raw_gid().get_lsb(); // make sure the component_type of the action matches the // component type in the destination address if (HPX_UNLIKELY(!components::types_are_compatible( addr.type_, comptype))) { hpx::util::osstream strm; strm << " types are not compatible: destination_type(" << addr.type_ << ") action_type(" << comptype << ") parcel (" << p << ")"; HPX_THROW_EXCEPTION(bad_component_type, "action_manager::fetch_parcel", hpx::util::osstream_get_string(strm)); } // either directly execute the action or create a new thread if (actions::base_action::direct_action == acttype) { // direct execution of the action if (!cont) { // No continuation is to be executed. act->get_thread_function(lva)(threads::wait_signaled); } else { // This parcel carries a continuation, we execute a wrapper // handling all related functionality. act->get_thread_function(cont, lva)(threads::wait_signaled); } } else { // dispatch action, register work item either with or without // continuation support if (!cont) { // No continuation is to be executed, register the plain // action and the local-virtual address with the TM only. threads::thread_init_data data; tm.register_work( act->get_thread_init_data(lva, data), threads::pending); } else { // This parcel carries a continuation, register a wrapper // which first executes the original thread function as // required by the action and triggers the continuations // afterwards. threads::thread_init_data data; tm.register_work( act->get_thread_init_data(cont, lva, data), threads::pending); } } } }
void decode_message_with_chunks( Parcelport & pp , Buffer buffer , std::size_t parcel_count , std::vector<serialization::serialization_chunk> &chunks , std::size_t num_thread = -1 ) { std::uint64_t inbound_data_size = buffer.data_size_; // protect from un-handled exceptions bubbling up try { try { // mark start of serialization util::high_resolution_timer timer; std::int64_t overall_add_parcel_time = 0; performance_counters::parcels::data_point& data = buffer.data_point_; { std::vector<parcel> deferred_parcels; // De-serialize the parcel data serialization::input_archive archive(buffer.data_, inbound_data_size, &chunks); if(parcel_count == 0) { archive >> parcel_count; //-V128 if (parcel_count > 1) deferred_parcels.reserve(parcel_count); } for(std::size_t i = 0; i != parcel_count; ++i) { bool deferred_schedule = true; if (i == parcel_count - 1) deferred_schedule = false; #if defined(HPX_HAVE_PARCELPORT_ACTION_COUNTERS) std::size_t archive_pos = archive.current_pos(); std::int64_t serialize_time = timer.elapsed_nanoseconds(); #endif // de-serialize parcel and add it to incoming parcel queue parcel p; // deferred_schedule will be set to false if it was previously // set to true and the action to be scheduled is direct. bool migrated = p.load_schedule(archive, num_thread, deferred_schedule); std::int64_t add_parcel_time = timer.elapsed_nanoseconds(); #if defined(HPX_HAVE_PARCELPORT_ACTION_COUNTERS) performance_counters::parcels::data_point action_data; action_data.bytes_ = archive.current_pos() - archive_pos; action_data.serialization_time_ = add_parcel_time - serialize_time; action_data.num_parcels_ = 1; pp.add_received_data(p.get_action()->get_action_name(), action_data); #endif // make sure this parcel ended up on the right locality naming::gid_type const& here = hpx::get_locality(); if (hpx::get_runtime_ptr() && here && (naming::get_locality_id_from_gid( p.destination_locality()) != naming::get_locality_id_from_gid(here))) { std::ostringstream os; os << "parcel destination does not match " "locality which received the parcel (" << here << "), " << p; HPX_THROW_EXCEPTION(invalid_status, "hpx::parcelset::decode_message", os.str()); return; } if (migrated) { naming::resolver_client& client = hpx::naming::get_agas_client(); client.route( std::move(p), &detail::parcel_route_handler, threads::thread_priority_normal); } else if (deferred_schedule) deferred_parcels.push_back(std::move(p)); // be sure not to measure add_parcel as serialization time overall_add_parcel_time += timer.elapsed_nanoseconds() - add_parcel_time; } // complete received data with parcel count data.num_parcels_ = parcel_count; data.raw_bytes_ = archive.bytes_read(); for (std::size_t i = 0; i != deferred_parcels.size(); ++i) { // If we are the last deferred parcel, we don't need to spin // a new thread... if (i == deferred_parcels.size() - 1) { deferred_parcels[i].schedule_action(num_thread); } // ... otherwise, schedule the parcel on a new thread. else { hpx::applier::register_thread_nullary( util::bind( util::one_shot( [num_thread](parcel&& p) { p.schedule_action(num_thread); } ), std::move(deferred_parcels[i])), "schedule_parcel", threads::pending, true, threads::thread_priority_critical, num_thread, threads::thread_stacksize_default); } } } // store the time required for serialization data.serialization_time_ = timer.elapsed_nanoseconds() - overall_add_parcel_time; pp.add_received_data(data); } catch (hpx::exception const& e) { LPT_(error) << "decode_message: caught hpx::exception: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::system::system_error const& e) { LPT_(error) << "decode_message: caught boost::system::error: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::exception const&) { LPT_(error) << "decode_message: caught boost::exception."; hpx::report_error(boost::current_exception()); } catch (std::exception const& e) { // We have to repackage all exceptions thrown by the // serialization library as otherwise we will loose the // e.what() description of the problem, due to slicing. boost::throw_exception(boost::enable_error_info( hpx::exception(serialization_error, e.what()))); } }