コード例 #1
0
        /// Query the current value of the accumulator.
        ///
        /// \note This function is fully synchronous.
        double query_sync()
        {
            HPX_ASSERT(this->get_id());

            return query_async().get();
        }
コード例 #2
0
ファイル: output_archive.hpp プロジェクト: akemp/hpx
 void save(bool b)
 {
     HPX_ASSERT(0 == static_cast<int>(b) || 1 == static_cast<int>(b));
     save_binary(&b, sizeof(bool));
 }
コード例 #3
0
ファイル: message_buffer.hpp プロジェクト: 41i/hpx
 bool empty() const
 {
     HPX_ASSERT(messages_.size() == handlers_.size());
     return messages_.empty();
 }
コード例 #4
0
ファイル: self.hpp プロジェクト: dmarce1/hpx
 tss_storage* get_or_create_thread_tss_data()
 {
     HPX_ASSERT(m_pimpl);
     return m_pimpl->get_thread_tss_data(true);
 }
コード例 #5
0
ファイル: locality.hpp プロジェクト: ddemidov/hpx
 Impl & get()
 {
     HPX_ASSERT(Impl::get_type() == get_type());
     return static_cast<impl<Impl>*>(this)->impl_;
 }
コード例 #6
0
ファイル: self.hpp プロジェクト: dmarce1/hpx
 thread_id_repr_type get_thread_id() const
 {
   HPX_ASSERT(m_pimpl);
   return m_pimpl->get_thread_id();
 }
コード例 #7
0
ファイル: self.hpp プロジェクト: dmarce1/hpx
 std::size_t set_thread_data(std::size_t data)
 {
     HPX_ASSERT(m_pimpl);
     return m_pimpl->set_thread_data(data);
 }
コード例 #8
0
 jacobi::row stencil_iterator::get(std::size_t idx)
 {
     HPX_ASSERT(rows[idx].id);
     return rows[idx];
 }
コード例 #9
0
ファイル: transpose_block.cpp プロジェクト: ltroska/hpx
 double operator[](std::size_t i) const
 {
     HPX_ASSERT(data_);
     return data_[i];
 }
コード例 #10
0
ファイル: memory_block.hpp プロジェクト: AntonBikineev/hpx
 ~memory_block_header()
 {
     // invoke destructor, if needed
     HPX_ASSERT(this->managing_object_.destruct());
     this->managing_object_.destruct()(this->get_ptr());
 }
コード例 #11
0
 void set_parcelhandler(parcelhandler* ph)
 {
     HPX_ASSERT(ph_ == 0);
     ph_ = ph;
 }
コード例 #12
0
 ~prepare_main_thread() noexcept
 {
     BOOL result = ConvertFiberToThread();
     HPX_ASSERT(FALSE != result);
     HPX_UNUSED(result);
 }
コード例 #13
0
 prepare_main_thread() noexcept
 {
     LPVOID result = ConvertThreadToFiber(nullptr);
     HPX_ASSERT(nullptr != result);
     HPX_UNUSED(result);
 }
コード例 #14
0
 HPX_FORCEINLINE VOID CALLBACK trampoline(LPVOID pv)
 {
     T* fun = static_cast<T*>(pv);
     HPX_ASSERT(fun);
     (*fun)();
 }
コード例 #15
0
ファイル: thread_pool.cpp プロジェクト: wzugang/hpx
 hpx::state thread_pool<Scheduler>::get_state(std::size_t num_thread) const
 {
     HPX_ASSERT(num_thread != std::size_t(-1));
     return sched_.get_state(num_thread).load();
 }
コード例 #16
0
ファイル: transpose_block.cpp プロジェクト: ltroska/hpx
 double & operator[](std::size_t i)
 {
     HPX_ASSERT(data_);
     HPX_ASSERT(mode_ == reference);
     return data_[i];
 }
コード例 #17
0
ファイル: self.hpp プロジェクト: dmarce1/hpx
 bool pending() const
 {
   HPX_ASSERT(m_pimpl);
   return m_pimpl->pending();
 }
コード例 #18
0
ファイル: capability.hpp プロジェクト: devangb/hpx
        bool test(std::size_t position) const
        {
            HPX_ASSERT(position < security::traits::capability<>::size);

            return (bits_[position / CHAR_BIT] & (1ull << (position % CHAR_BIT))) != 0;
        }
コード例 #19
0
ファイル: self.hpp プロジェクト: dmarce1/hpx
 std::size_t get_thread_data() const
 {
     HPX_ASSERT(m_pimpl);
     return m_pimpl->get_thread_data();
 }
コード例 #20
0
ファイル: applier.cpp プロジェクト: parsa/hpx
    // schedule threads based on given parcel
    void applier::schedule_action(parcelset::parcel p, std::size_t num_thread)
    {
        // fetch the set of destinations
#if !defined(HPX_SUPPORT_MULTIPLE_PARCEL_DESTINATIONS)
        std::size_t const size = 1ul;
#else
        std::size_t const size = p.size();
#endif
        naming::id_type const* ids = p.destinations();
        naming::address const* addrs = p.addrs();

        // decode the action-type in the parcel
        std::unique_ptr<actions::continuation> cont = p.get_continuation();
        actions::base_action * act = p.get_action();

#if defined(HPX_HAVE_SECURITY)
        // we look up the certificate of the originating locality, no matter
        // whether this parcel was routed through another locality or not
        boost::uint32_t locality_id =
            naming::get_locality_id_from_gid(p.get_parcel_id());
        error_code ec(lightweight);
        components::security::signed_certificate const& cert =
            get_locality_certificate(locality_id, ec);

        if (verify_capabilities_ && ec) {
            // we should have received the sender's certificate by now
            HPX_THROW_EXCEPTION(security_error,
                "applier::schedule_action",
                boost::str(boost::format("couldn't extract sender's "
                    "certificate (sender locality id: %1%)") % locality_id));
            return;
        }

        components::security::capability caps_sender;
        if (verify_capabilities_)
            caps_sender = cert.get_type().get_capability();
#endif
        int comptype = act->get_component_type();
        naming::gid_type dest = p.destination_locality();

        // if the parcel carries a continuation it should be directed to a
        // single destination
        HPX_ASSERT(!cont || size == 1);

        naming::resolver_client& client = hpx::naming::get_agas_client();

        // schedule a thread for each of the destinations
        for (std::size_t i = 0; i != size; ++i)
        {
            naming::address const& addr = addrs[i];

            // make sure this parcel destination matches the proper locality
            HPX_ASSERT(dest == addr.locality_);

            // decode the local virtual address of the parcel
            naming::address::address_type lva = addr.address_;

            // by convention, a zero address references either the local
            // runtime support component or one of the AGAS components
            if (0 == lva)
            {
                switch(comptype)
                {
                case components::component_runtime_support:
                    lva = get_runtime_support_raw_gid().get_lsb();
                    break;

                case components::component_agas_primary_namespace:
                    lva = get_agas_client().get_primary_ns_lva();
                    break;

                case components::component_agas_symbol_namespace:
                    lva = get_agas_client().get_symbol_ns_lva();
                    break;

                case components::component_plain_function:
                    break;

                default:
                    HPX_ASSERT(false);
                }
            }
            else if (comptype == components::component_memory)
            {
                HPX_ASSERT(naming::refers_to_virtual_memory(ids[i].get_gid()));
                lva = get_memory_raw_gid().get_lsb();
            }

            // make sure the target has not been migrated away
            auto r = act->was_object_migrated(ids[i], lva);
            if (r.first)
            {
#if defined(HPX_SUPPORT_MULTIPLE_PARCEL_DESTINATIONS)
                // it's unclear at this point what could be done if there is
                // more than one destination
                HPX_ASSERT(size == 1);
#endif
                // set continuation in outgoing parcel
                if (cont)
                    p.set_continuation(std::move(cont));

                // route parcel to new locality of target
                client.route(
                    std::move(p),
                    util::bind(&detail::parcel_sent_handler,
                        boost::ref(parcel_handler_),
                        util::placeholders::_1, util::placeholders::_2),
                    threads::thread_priority_normal);
                break;
            }

#if defined(HPX_HAVE_SECURITY)
            if (verify_capabilities_) {
                components::security::capability caps_action =
                    act->get_required_capabilities(lva);

                if (caps_action.verify(caps_sender) == false) {
                    HPX_THROW_EXCEPTION(security_error,
                        "applier::schedule_action",
                        boost::str(boost::format("sender has insufficient capabilities "
                            "to execute the action (%1%, sender: %2%, action %3%)") %
                            act->get_action_name() % caps_sender % caps_action));
                    return;
                }
            }
#endif
            // make sure the component_type of the action matches the
            // component type in the destination address
            if (HPX_UNLIKELY(!components::types_are_compatible(
                addr.type_, comptype)))
            {
                std::ostringstream strm;
                strm << " types are not compatible: destination_type("
                      << addr.type_ << ") action_type(" << comptype
                      << ") parcel ("  << p << ")";
                HPX_THROW_EXCEPTION(bad_component_type,
                    "applier::schedule_action",
                    strm.str());
            }

            // dispatch action, register work item either with or without
            // continuation support
            if (!cont) {
                // No continuation is to be executed, register the plain
                // action and the local-virtual address.
                act->schedule_thread(ids[i], lva, threads::pending, num_thread);
            }
            else {
                // This parcel carries a continuation, register a wrapper
                // which first executes the original thread function as
                // required by the action and triggers the continuations
                // afterwards.
                act->schedule_thread(std::move(cont), ids[i], lva,
                    threads::pending, num_thread);
            }
        }
    }
コード例 #21
0
ファイル: self.hpp プロジェクト: dmarce1/hpx
 tss_storage* get_thread_tss_data()
 {
     HPX_ASSERT(m_pimpl);
     return m_pimpl->get_thread_tss_data(false);
 }
コード例 #22
0
ファイル: applier.cpp プロジェクト: parsa/hpx
 applier& get_applier()
 {
     // should have been initialized
     HPX_ASSERT(NULL != applier::applier_.get());
     return **applier::applier_;
 }
コード例 #23
0
ファイル: base_lco_with_value.hpp プロジェクト: Bcorde5/hpx
 static void set(components::component_type)
 {
     HPX_ASSERT(false);
 }
コード例 #24
0
ファイル: latch.hpp プロジェクト: HadrienG2/hpx
 /// Requires: No threads are blocked at the synchronization point.
 ///
 /// \note May be called even if some threads have not yet returned
 ///       from wait() or count_down_and_wait(), provided that counter_
 ///       is 0.
 /// \note The destructor might not return until all threads have exited
 ///       wait() or count_down_and_wait().
 /// \note It is the caller's responsibility to ensure that no other
 ///       thread enters wait() after one thread has called the
 ///       destructor. This may require additional coordination.
 ~latch ()
 {
     boost::unique_lock<mutex_type> l(mtx_);
     HPX_ASSERT(counter_ == 0);
 }
コード例 #25
0
ファイル: locality.hpp プロジェクト: ddemidov/hpx
 Impl const & get() const
 {
     HPX_ASSERT(Impl::get_type() == get_type());
     return static_cast<const impl<Impl>*>(this)->impl_;
 }
コード例 #26
0
ファイル: thread_pool.cpp プロジェクト: wzugang/hpx
 boost::thread& thread_pool<Scheduler>::get_os_thread_handle(
     std::size_t num_thread)
 {
     HPX_ASSERT(num_thread < threads_.size());
     return threads_[threads_.size() - num_thread - 1];
 }
コード例 #27
0
ファイル: message_buffer.hpp プロジェクト: 41i/hpx
 std::size_t size() const
 {
     HPX_ASSERT(messages_.size() == handlers_.size());
     return messages_.size();
 }
コード例 #28
0
ファイル: thread_pool.cpp プロジェクト: wzugang/hpx
    bool thread_pool<Scheduler>::run(boost::unique_lock<boost::mutex>& l,
        std::size_t num_threads)
    {
        HPX_ASSERT(l.owns_lock());

        LTM_(info) //-V128
            << "thread_pool::run: " << pool_name_
            << " number of processing units available: " //-V128
            << threads::hardware_concurrency();
        LTM_(info) //-V128
            << "thread_pool::run: " << pool_name_
            << " creating " << num_threads << " OS thread(s)"; //-V128

        if (0 == num_threads) {
            HPX_THROW_EXCEPTION(bad_parameter,
                "thread_pool::run", "number of threads is zero");
        }

#if defined(HPX_HAVE_THREAD_CUMULATIVE_COUNTS) && \
    defined(HPX_HAVE_THREAD_IDLE_RATES)
        // scale timestamps to nanoseconds
        boost::uint64_t base_timestamp = util::hardware::timestamp();
        boost::uint64_t base_time = util::high_resolution_clock::now();
        boost::uint64_t curr_timestamp = util::hardware::timestamp();
        boost::uint64_t curr_time = util::high_resolution_clock::now();

        while ((curr_time - base_time) <= 100000)
        {
            curr_timestamp = util::hardware::timestamp();
            curr_time = util::high_resolution_clock::now();
        }

        if (curr_timestamp - base_timestamp != 0)
        {
            timestamp_scale_ = double(curr_time - base_time) /
                double(curr_timestamp - base_timestamp);
        }

        LTM_(info)
            << "thread_pool::run: " << pool_name_
            << " timestamp_scale: " << timestamp_scale_; //-V128
#endif

        if (!threads_.empty() || sched_.has_reached_state(state_running))
            return true;    // do nothing if already running

        executed_threads_.resize(num_threads);
        executed_thread_phases_.resize(num_threads);
        tfunc_times_.resize(num_threads);
        exec_times_.resize(num_threads);

        try {
            HPX_ASSERT(startup_.get() == 0);
            startup_.reset(
                new boost::barrier(static_cast<unsigned>(num_threads+1))
            );

            // run threads and wait for initialization to complete
            sched_.set_all_states(state_running);

            topology const& topology_ = get_topology();

            std::size_t thread_num = num_threads;
            while (thread_num-- != 0) {
                threads::mask_cref_type mask =
                    sched_.Scheduler::get_pu_mask(topology_, thread_num);

                LTM_(info) //-V128
                    << "thread_pool::run: " << pool_name_
                    << " create OS thread " << thread_num //-V128
                    << ": will run on processing units within this mask: "
#if !defined(HPX_WITH_MORE_THAN_64_THREADS) || \
    (defined(HPX_HAVE_MAX_CPU_COUNT) && HPX_HAVE_MAX_CPU_COUNT <= 64)
                    << std::hex << "0x" << mask;
#else
                    << "0b" << mask;
#endif

                // create a new thread
                threads_.push_back(new boost::thread(
                        util::bind(&thread_pool::thread_func, this, thread_num,
                            boost::ref(topology_), boost::ref(*startup_))
                    ));

                // set the new threads affinity (on Windows systems)
                if (any(mask))
                {
                    error_code ec(lightweight);
                    topology_.set_thread_affinity_mask(threads_.back(), mask, ec);
                    if (ec)
                    {
                        LTM_(warning) //-V128
                            << "thread_pool::run: " << pool_name_
                            << " setting thread affinity on OS thread " //-V128
                            << thread_num << " failed with: "
                            << ec.get_message();
                    }
                }
                else
                {
                    LTM_(debug) //-V128
                        << "thread_pool::run: " << pool_name_
                        << " setting thread affinity on OS thread " //-V128
                        << thread_num << " was explicitly disabled.";
                }
            }

            // the main thread needs to have a unique thread_num
            init_tss(num_threads);
            startup_->wait();
        }
コード例 #29
0
 void load(bool & b)
 {
     load_binary(&b, sizeof(bool));
     HPX_ASSERT(0 == static_cast<int>(b) || 1 == static_cast<int>(b));
 }
コード例 #30
0
 /// Erase all values with the given key from the partition_unordered_map
 /// container.
 ///
 /// \param key  Key of the element in the partition_unordered_map
 ///
 /// \return This returns the hpx::future containing the number of
 ///         elements erased
 ///
 future<std::size_t> erase(Key const& key)
 {
     HPX_ASSERT(this->get_gid());
     return hpx::async<typename server_type::erase_action>(
         this->get_gid(), key);
 }