void decode_parcels(Parcelport & parcelport, Connection & connection, boost::shared_ptr<Buffer> buffer) { typedef typename Buffer::transmission_chunk_type transmission_chunk_type; // add parcel data to incoming parcel queue std::size_t num_zero_copy_chunks = static_cast<std::size_t>( static_cast<boost::uint32_t>(buffer->num_chunks_.first)); // boost::shared_ptr<std::vector<std::vector<char> > > in_chunks(in_chunks_); boost::shared_ptr<std::vector<util::serialization_chunk> > chunks; if (num_zero_copy_chunks != 0) { // decode chunk information chunks = boost::make_shared<std::vector<util::serialization_chunk> >(); std::size_t num_non_zero_copy_chunks = static_cast<std::size_t>( static_cast<boost::uint32_t>(buffer->num_chunks_.second)); chunks->resize(num_zero_copy_chunks + num_non_zero_copy_chunks); // place the zero-copy chunks at their spots first for (std::size_t i = 0; i != num_zero_copy_chunks; ++i) { transmission_chunk_type& c = buffer->transmission_chunks_[i]; boost::uint64_t first = c.first, second = c.second; HPX_ASSERT(buffer->chunks_[i].size() == second); (*chunks)[first] = util::create_pointer_chunk( buffer->chunks_[i].data(), second); } std::size_t index = 0; for (std::size_t i = num_zero_copy_chunks; i != num_zero_copy_chunks + num_non_zero_copy_chunks; ++i) { transmission_chunk_type& c = buffer->transmission_chunks_[i]; boost::uint64_t first = c.first, second = c.second; // find next free entry while ((*chunks)[index].size_ != 0) ++index; // place the index based chunk at the right spot (*chunks)[index] = util::create_index_chunk(first, second); ++index; } HPX_ASSERT(index == num_zero_copy_chunks + num_non_zero_copy_chunks); } bool first_message = false; #if defined(HPX_HAVE_SECURITY) if(connection.first_message_) { connection.first_message_ = false; first_message = true; } #endif HPX_ASSERT(!buffer->parcels_decoded_); if(hpx::is_running() && parcelport.async_serialization()) { hpx::applier::register_thread_nullary( util::bind( util::one_shot(&decode_parcels_impl<Parcelport, Buffer>), boost::ref(parcelport), buffer, chunks, first_message), "decode_parcels", threads::pending, true, threads::thread_priority_boost); } else { decode_parcels_impl(parcelport, buffer, chunks, first_message); } }
void decode_message(Parcelport & pp, boost::shared_ptr<Buffer> buffer, std::vector<util::serialization_chunk> const *chunks, bool first_message = false) { unsigned archive_flags = boost::archive::no_header; if (!pp.allow_array_optimizations()) { archive_flags |= util::disable_array_optimization; archive_flags |= util::disable_data_chunking; } else if (!pp.allow_zero_copy_optimizations()) { archive_flags |= util::disable_data_chunking; } boost::uint64_t inbound_data_size = buffer->data_size_; // protect from un-handled exceptions bubbling up try { try { // mark start of serialization util::high_resolution_timer timer; boost::int64_t overall_add_parcel_time = 0; performance_counters::parcels::data_point& data = buffer->data_point_; { // De-serialize the parcel data util::portable_binary_iarchive archive(buffer->data_, chunks, inbound_data_size, archive_flags); std::size_t parcel_count = 0; archive >> parcel_count; //-V128 for(std::size_t i = 0; i != parcel_count; ++i) { #if defined(HPX_HAVE_SECURITY) naming::gid_type parcel_id; if (pp.enable_security()) { // handle certificate and verify parcel suffix once first_message = deserialize_certificate(archive, first_message); if (!first_message && i == 0) { verify_message_suffix(buffer->data_, buffer->data_point_, parcel_id); } } // de-serialize parcel and add it to incoming parcel queue parcel p; archive >> p; // verify parcel id, but only once while handling the // first parcel if (pp.enable_security() && !first_message && i == 0 && parcel_id != p.get_parcel_id()) { // again, all hell breaks loose HPX_THROW_EXCEPTION(security_error, "decode_message", "parcel id mismatch"); return; } #else // de-serialize parcel and add it to incoming parcel queue parcel p; archive >> p; #endif // make sure this parcel ended up on the right locality HPX_ASSERT(p.get_destination_locality() == pp.here()); // be sure not to measure add_parcel as serialization time boost::int64_t add_parcel_time = timer.elapsed_nanoseconds(); pp.add_received_parcel(p); overall_add_parcel_time += timer.elapsed_nanoseconds() - add_parcel_time; } // complete received data with parcel count data.num_parcels_ = parcel_count; data.raw_bytes_ = archive.bytes_read(); } // store the time required for serialization data.serialization_time_ = timer.elapsed_nanoseconds() - overall_add_parcel_time; pp.add_received_data(data); } catch (hpx::exception const& e) { LPT_(error) << "decode_message: caught hpx::exception: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::system::system_error const& e) { LPT_(error) << "decode_message: caught boost::system::error: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::exception const&) { LPT_(error) << "decode_message: caught boost::exception."; hpx::report_error(boost::current_exception()); } catch (std::exception const& e) { // We have to repackage all exceptions thrown by the // serialization library as otherwise we will loose the // e.what() description of the problem, due to slicing. boost::throw_exception(boost::enable_error_info( hpx::exception(serialization_error, e.what()))); } } catch (...) { LPT_(error) << "decode_message: caught unknown exception."; hpx::report_error(boost::current_exception()); } }
void decode_message_with_chunks( Parcelport & pp , Buffer buffer , std::size_t parcel_count , std::vector<serialization::serialization_chunk> &chunks , std::size_t num_thread = -1 ) { std::uint64_t inbound_data_size = buffer.data_size_; // protect from un-handled exceptions bubbling up try { try { // mark start of serialization util::high_resolution_timer timer; std::int64_t overall_add_parcel_time = 0; performance_counters::parcels::data_point& data = buffer.data_point_; { std::vector<parcel> deferred_parcels; // De-serialize the parcel data serialization::input_archive archive(buffer.data_, inbound_data_size, &chunks); if(parcel_count == 0) { archive >> parcel_count; //-V128 if (parcel_count > 1) deferred_parcels.reserve(parcel_count); } for(std::size_t i = 0; i != parcel_count; ++i) { bool deferred_schedule = true; if (i == parcel_count - 1) deferred_schedule = false; #if defined(HPX_HAVE_PARCELPORT_ACTION_COUNTERS) std::size_t archive_pos = archive.current_pos(); std::int64_t serialize_time = timer.elapsed_nanoseconds(); #endif // de-serialize parcel and add it to incoming parcel queue parcel p; // deferred_schedule will be set to false if it was previously // set to true and the action to be scheduled is direct. bool migrated = p.load_schedule(archive, num_thread, deferred_schedule); std::int64_t add_parcel_time = timer.elapsed_nanoseconds(); #if defined(HPX_HAVE_PARCELPORT_ACTION_COUNTERS) performance_counters::parcels::data_point action_data; action_data.bytes_ = archive.current_pos() - archive_pos; action_data.serialization_time_ = add_parcel_time - serialize_time; action_data.num_parcels_ = 1; pp.add_received_data(p.get_action()->get_action_name(), action_data); #endif // make sure this parcel ended up on the right locality naming::gid_type const& here = hpx::get_locality(); if (hpx::get_runtime_ptr() && here && (naming::get_locality_id_from_gid( p.destination_locality()) != naming::get_locality_id_from_gid(here))) { std::ostringstream os; os << "parcel destination does not match " "locality which received the parcel (" << here << "), " << p; HPX_THROW_EXCEPTION(invalid_status, "hpx::parcelset::decode_message", os.str()); return; } if (migrated) { naming::resolver_client& client = hpx::naming::get_agas_client(); client.route( std::move(p), &detail::parcel_route_handler, threads::thread_priority_normal); } else if (deferred_schedule) deferred_parcels.push_back(std::move(p)); // be sure not to measure add_parcel as serialization time overall_add_parcel_time += timer.elapsed_nanoseconds() - add_parcel_time; } // complete received data with parcel count data.num_parcels_ = parcel_count; data.raw_bytes_ = archive.bytes_read(); for (std::size_t i = 0; i != deferred_parcels.size(); ++i) { // If we are the last deferred parcel, we don't need to spin // a new thread... if (i == deferred_parcels.size() - 1) { deferred_parcels[i].schedule_action(num_thread); } // ... otherwise, schedule the parcel on a new thread. else { hpx::applier::register_thread_nullary( util::bind( util::one_shot( [num_thread](parcel&& p) { p.schedule_action(num_thread); } ), std::move(deferred_parcels[i])), "schedule_parcel", threads::pending, true, threads::thread_priority_critical, num_thread, threads::thread_stacksize_default); } } } // store the time required for serialization data.serialization_time_ = timer.elapsed_nanoseconds() - overall_add_parcel_time; pp.add_received_data(data); } catch (hpx::exception const& e) { LPT_(error) << "decode_message: caught hpx::exception: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::system::system_error const& e) { LPT_(error) << "decode_message: caught boost::system::error: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::exception const&) { LPT_(error) << "decode_message: caught boost::exception."; hpx::report_error(boost::current_exception()); } catch (std::exception const& e) { // We have to repackage all exceptions thrown by the // serialization library as otherwise we will loose the // e.what() description of the problem, due to slicing. boost::throw_exception(boost::enable_error_info( hpx::exception(serialization_error, e.what()))); } }