Exemplo n.º 1
0
    ///////////////////////////////////////////////////////////////////////////
    /// This thread function is used by the at_timer thread below to trigger
    /// the required action.
    inline thread_state_enum wake_timer_thread(
        thread_id_type const& thrd, thread_state_enum newstate,
        thread_state_ex_enum newstate_ex, thread_priority priority,
        thread_id_type const& timer_id,
        boost::shared_ptr<boost::atomic<bool> > const& triggered)
    {
        if (HPX_UNLIKELY(!thrd)) {
            HPX_THROW_EXCEPTION(null_thread_id,
                "threads::detail::wake_timer_thread",
                "NULL thread id encountered (id)");
            return terminated;
        }
        if (HPX_UNLIKELY(!timer_id)) {
            HPX_THROW_EXCEPTION(null_thread_id,
                "threads::detail::wake_timer_thread",
                "NULL thread id encountered (timer_id)");
            return terminated;
        }

        bool oldvalue = false;
        if (triggered->compare_exchange_strong(oldvalue, true)) //-V601
        {
            // timer has not been canceled yet, trigger the requested set_state
            detail::set_thread_state(thrd, newstate, newstate_ex, priority);
        }

        // then re-activate the thread holding the deadline_timer
        // REVIEW: Why do we ignore errors here?
        error_code ec(lightweight);    // do not throw
        detail::set_thread_state(timer_id, pending, wait_timeout,
            thread_priority_normal, std::size_t(-1), ec);
        return terminated;
    }
Exemplo n.º 2
0
        void add_heap(heap_type* p)
        {
            if (HPX_UNLIKELY(!p))
            {
                HPX_THROW_EXCEPTION(bad_parameter,
                    name() + "::add_heap", "encountered NULL heap");
            }

            unique_lock_type ul(mtx_);
#if defined(HPX_DEBUG)
            p->heap_count_ = heap_count_;
#endif

            iterator it = heap_list_.insert(heap_list_.begin(),
                typename list_type::value_type(p));

            // Check for insertion failure.
            if (HPX_UNLIKELY(it == heap_list_.end()))
            {
                HPX_THROW_EXCEPTION(out_of_memory,
                    name() + "::add_heap",
                    boost::str(boost::format("heap %1% could not be added") % p));
            }

#if defined(HPX_DEBUG)
            ++heap_count_;
#endif
        }
Exemplo n.º 3
0
    inline thread_state_enum set_active_state(
        thread_id_type const& thrd, thread_state_enum newstate,
        thread_state_ex_enum newstate_ex, thread_priority priority,
        thread_state previous_state)
    {
        if (HPX_UNLIKELY(!thrd)) {
            HPX_THROW_EXCEPTION(null_thread_id,
                "threads::detail::set_active_state",
                "NULL thread id encountered");
            return terminated;
        }

        // make sure that the thread has not been suspended and set active again
        // in the mean time
        thread_state current_state = thrd->get_state();

        if (thread_state_enum(current_state) == thread_state_enum(previous_state) &&
            current_state != previous_state)
        {
            LTM_(warning)
                << "set_active_state: thread is still active, however "
                      "it was non-active since the original set_state "
                      "request was issued, aborting state change, thread("
                << thrd.get() << "), description("
                << thrd->get_description() << "), new state("
                << get_thread_state_name(newstate) << ")";
            return terminated;
        }

        // just retry, set_state will create new thread if target is still active
        error_code ec(lightweight);      // do not throw
        detail::set_thread_state(thrd, newstate, newstate_ex, priority, std::size_t(-1), ec);
        return terminated;
    }
Exemplo n.º 4
0
            void set_locked(typename mutex_type::scoped_lock& l)
            {
                BOOST_ASSERT(l.owns_lock());

                // swap the list
                queue_type queue;
                queue.swap(queue_);
                l.unlock();

                // release the threads
                while (!queue.empty())
                {
                    threads::thread_id_type id = queue.front().id_;
                    if (HPX_UNLIKELY(!id))
                    {
                        HPX_THROW_EXCEPTION(null_thread_id,
                            "lcos::event::set_locked",
                            "NULL thread id encountered");
                    }
                    queue.front().id_ = threads::invalid_thread_id;
                    queue.pop_front();
                    threads::set_thread_lco_description(id);
                    threads::set_thread_state(id, threads::pending);
                }
            }
response component_namespace::bind_prefix(
    request const& req
  , error_code& ec
    )
{ // {{{ bind_prefix implementation
    // parameters
    std::string key = req.get_name();
    boost::uint32_t prefix = req.get_locality_id();

    mutex_type::scoped_lock l(mutex_);

    component_id_table_type::left_map::iterator cit = component_ids_.left.find(key)
                                    , cend = component_ids_.left.end();

    // This is the first request, so we use the type counter, and then
    // increment it.
    if (component_ids_.left.find(key) == cend)
    {
        if (HPX_UNLIKELY(!util::insert_checked(component_ids_.left.insert(
                std::make_pair(key, type_counter)), cit)))
        {
            l.unlock();

            HPX_THROWS_IF(ec, lock_error
              , "component_namespace::bind_prefix"
              , "component id table insertion failed due to a locking "
                "error or memory corruption")
            return response();
        }

        // If the insertion succeeded, we need to increment the type
        // counter.
        ++type_counter;
    }
Exemplo n.º 6
0
        ///////////////////////////////////////////////////////////////////////
        // create a new thread and schedule it if the initial state is equal to
        // pending
        thread_id_type create_thread(thread_init_data& data,
            thread_state_enum initial_state, bool run_now,
            std::size_t num_thread, error_code& ec)
        {
            if (run_now) {
                mutex_type::scoped_lock lk(mtx_);

                HPX_STD_UNIQUE_PTR<threads::thread_data> thrd (
                    new (memory_pool_) threads::thread_data(
                        data, memory_pool_, initial_state));

                // add a new entry in the map for this thread
                thread_id_type id = thrd->get_thread_id();
                std::pair<thread_map_type::iterator, bool> p =
                    thread_map_.insert(id, thrd.get());

                if (HPX_UNLIKELY(!p.second)) {
                    HPX_THROWS_IF(ec, hpx::out_of_memory,
                        "threadmanager::register_thread",
                        "Couldn't add new thread to the map of threads");
                    return invalid_thread_id;
                }

                // push the new thread in the pending queue thread
                if (initial_state == pending)
                    schedule_thread(thrd.get(), num_thread);

                // this thread has to be in the map now
                BOOST_ASSERT(thread_map_.find(id) != thread_map_.end());
                BOOST_ASSERT(thrd->is_created_from(&memory_pool_));

                do_some_work();       // try to execute the new work item
                thrd.release();       // release ownership to the map

                if (&ec != &throws)
                    ec = make_success_code();

                // return the thread_id of the newly created thread
                return id;
            }

            // do not execute the work, but register a task description for
            // later thread creation
#if HPX_THREAD_MAINTAIN_QUEUE_WAITTIME
            new_tasks_.enqueue(new task_description(
                boost::move(data), initial_state,
                util::high_resolution_clock::now()
            ));
#else
            new_tasks_.enqueue(new task_description(
                boost::move(data), initial_state));
#endif
            ++new_tasks_count_;

            if (&ec != &throws)
                ec = make_success_code();

            return invalid_thread_id;     // thread has not been created yet
        }
Exemplo n.º 7
0
    thread_state_enum at_timer(SchedulingPolicy& scheduler,
        boost::chrono::steady_clock::time_point& abs_time,
        thread_id_type const& thrd, thread_state_enum newstate,
        thread_state_ex_enum newstate_ex, thread_priority priority)
    {
        if (HPX_UNLIKELY(!thrd)) {
            HPX_THROW_EXCEPTION(null_thread_id,
                "threads::detail::at_timer",
                "NULL thread id encountered");
            return terminated;
        }

        // create a new thread in suspended state, which will execute the
        // requested set_state when timer fires and will re-awaken this thread,
        // allowing the deadline_timer to go out of scope gracefully
        thread_id_type self_id = get_self_id();

        boost::shared_ptr<boost::atomic<bool> > triggered(
            boost::make_shared<boost::atomic<bool> >(false));

        thread_init_data data(
            boost::bind(&wake_timer_thread,
                thrd, newstate, newstate_ex, priority,
                self_id, triggered),
            "wake_timer", 0, priority);

        thread_id_type wake_id = invalid_thread_id;
        create_thread(&scheduler, data, wake_id, suspended);

        // create timer firing in correspondence with given time
        typedef boost::asio::basic_deadline_timer<
            boost::chrono::steady_clock
          , util::chrono_traits<boost::chrono::steady_clock>
        > deadline_timer;

        deadline_timer t (
            get_thread_pool("timer-pool")->get_io_service(), abs_time);

        // let the timer invoke the set_state on the new (suspended) thread
        t.async_wait(boost::bind(&detail::set_thread_state,
            wake_id, pending, wait_timeout, priority,
            std::size_t(-1), boost::ref(throws)));

        // this waits for the thread to be reactivated when the timer fired
        // if it returns signaled the timer has been canceled, otherwise
        // the timer fired and the wake_timer_thread above has been executed
        bool oldvalue = false;
        thread_state_ex_enum statex = get_self().yield(suspended);

        if (wait_timeout != statex &&
            triggered->compare_exchange_strong(oldvalue, true)) //-V601
        {
            // wake_timer_thread has not been executed yet, cancel timer
            t.cancel();
        }

        return terminated;
    }
Exemplo n.º 8
0
 thread_self& get_self()
 {
     thread_self* p = get_self_ptr();
     if (HPX_UNLIKELY(!p)) {
         HPX_THROW_EXCEPTION(null_thread_id, "threads::get_self",
             "NULL thread id encountered (is this executed on a HPX-thread?)");
     }
     return *p;
 }
Exemplo n.º 9
0
        /// This is a function which gets called periodically by the thread
        /// manager to allow for maintenance tasks to be executed in the
        /// scheduler. Returns true if the OS thread calling this function
        /// has to be terminated (i.e. no more work has to be done).
        virtual bool wait_or_add_new(std::size_t num_thread, bool running,
            std::int64_t& idle_loop_count)
        {
            std::size_t queues_size = this->queues_.size();
            HPX_ASSERT(num_thread < queues_size);

            std::size_t added = 0;
            bool result = true;

            result = this->queues_[num_thread]->wait_or_add_new(running,
                idle_loop_count, added) && result;
            if (0 != added) return result;

#ifdef HPX_HAVE_THREAD_MINIMAL_DEADLOCK_DETECTION
            // no new work is available, are we deadlocked?
            if (HPX_UNLIKELY(minimal_deadlock_detection && LHPX_ENABLED(error)))
            {
                bool suspended_only = true;

                for (std::size_t i = 0;
                     suspended_only && i != queues_size; ++i)
                {
                    suspended_only = this->queues_[i]->dump_suspended_threads(
                        i, idle_loop_count, running);
                }

                if (HPX_UNLIKELY(suspended_only)) {
                    if (running) {
                        LTM_(error) //-V128
                            << "queue(" << num_thread << "): "
                            << "no new work available, are we deadlocked?";
                    }
                    else {
                        LHPX_CONSOLE_(hpx::util::logging::level::error) //-V128
                              << "  [TM] queue(" << num_thread << "): "
                              << "no new work available, are we deadlocked?\n";
                    }
                }
            }
#endif

            return result;
        }
Exemplo n.º 10
0
    thread_self* get_self_ptr_checked(error_code& ec)
    {
        thread_self* p = thread_self::impl_type::get_self();

        if (HPX_UNLIKELY(!p))
        {
            HPX_THROWS_IF(ec, null_thread_id, "threads::get_self_ptr_checked",
                "NULL thread id encountered (is this executed on a HPX-thread?)");
            return 0;
        }

        if (&ec != &throws)
            ec = make_success_code();

        return p;
    }
Exemplo n.º 11
0
response component_namespace::bind_name(
    request const& req
  , error_code& ec
    )
{ // {{{ bind_name implementation
    // parameters
    std::string key = req.get_name();

    std::unique_lock<mutex_type> l(mutex_);

    component_id_table_type::left_map::iterator it = component_ids_.left.find(key)
                                    , end = component_ids_.left.end();

    // If the name is not in the table, register it (this is only done so
    // we can implement a backwards compatible get_component_id).
    if (it == end)
    {
        if (HPX_UNLIKELY(!util::insert_checked(component_ids_.left.insert(
                std::make_pair(key, type_counter)), it)))
        {
            l.unlock();

            HPX_THROWS_IF(ec, lock_error
              , "component_namespace::bind_name"
              , "component id table insertion failed due to a locking "
                "error or memory corruption");
            return response();
        }

        // If the insertion succeeded, we need to increment the type
        // counter.
        ++type_counter;
    }

    LAGAS_(info) << (boost::format(
        "component_namespace::bind_name, key(%1%), ctype(%2%)")
        % key % it->second);

    if (&ec != &throws)
        ec = make_success_code();

    return response(component_ns_bind_name, it->second);
} // }}}
Exemplo n.º 12
0
    bool statistics_counter<Statistic>::ensure_base_counter()
    {
        // lock here to avoid checking out multiple reference counted GIDs
        // from AGAS. This
        std::unique_lock<mutex_type> l(mtx_);

        if (!base_counter_id_) {
            // get or create the base counter
            error_code ec(lightweight);
            hpx::id_type base_counter_id;
            {
                // We need to unlock the lock here since get_counter might suspend
                util::unlock_guard<std::unique_lock<mutex_type> > unlock(l);
                base_counter_id = get_counter(base_counter_name_, ec);
            }
            // After reacquiring the lock, we need to check again if base_counter_id_
            // hasn't been set yet
            if(!base_counter_id_)
            {
                base_counter_id_ = base_counter_id;
            }
            else
            {
                // If it was set already by a different thread, return true.
                return true;
            }

            if (HPX_UNLIKELY(ec || !base_counter_id_))
            {
                // base counter could not be retrieved
                HPX_THROW_EXCEPTION(bad_parameter,
                    "statistics_counter<Statistic>::evaluate_base_counter",
                    boost::str(boost::format(
                        "could not get or create performance counter: '%s'") %
                            base_counter_name_)
                    );
                return false;
            }
        }

        return true;
    }
Exemplo n.º 13
0
        void unlock(error_code& ec = throws)
        {
            HPX_ASSERT(threads::get_self_ptr() != 0);

            HPX_ITT_SYNC_RELEASING(this);
            mutex_type::scoped_lock l(mtx_);

            threads::thread_id_repr_type self_id = threads::get_self_id().get();
            if (HPX_UNLIKELY(owner_id_ != self_id))
            {
                HPX_THROWS_IF(ec, lock_error,
                    "mutex::unlock",
                    "The calling thread does not own the mutex");
                return;
            }

            util::unregister_lock(this);
            HPX_ITT_SYNC_RELEASED(this);
            owner_id_ = threads::invalid_thread_id_repr;

            cond_.notify_one(l, ec);
        }
Exemplo n.º 14
0
    thread_id_type set_thread_state_timed(SchedulingPolicy& scheduler,
        util::steady_time_point const& abs_time, thread_id_type const& thrd,
        thread_state_enum newstate, thread_state_ex_enum newstate_ex,
        thread_priority priority, std::size_t thread_num, error_code& ec)
    {
        if (HPX_UNLIKELY(!thrd)) {
            HPX_THROWS_IF(ec, null_thread_id,
                "threads::detail::set_thread_state",
                "NULL thread id encountered");
            return 0;
        }

        // this creates a new thread which creates the timer and handles the
        // requested actions
        thread_init_data data(
            boost::bind(&at_timer<SchedulingPolicy>,
                boost::ref(scheduler), abs_time.value(), thrd, newstate, newstate_ex,
                priority),
            "at_timer (expire at)", 0, priority, thread_num);

        thread_id_type newid = invalid_thread_id;
        create_thread(&scheduler, data, newid, pending, true, ec); //-V601
        return newid;
    }
Exemplo n.º 15
0
    void* one_size_heap_list::alloc(std::size_t count)
    {
        unique_lock_type guard(mtx_);

        if (HPX_UNLIKELY(0 == count))
        {
            guard.unlock();
            HPX_THROW_EXCEPTION(bad_parameter,
                name() + "::alloc",
                "cannot allocate 0 objects");
        }

        //std::size_t size = 0;
        void* p = nullptr;
        {
            if (!heap_list_.empty())
            {
                //size = heap_list_.size();
                for (auto & heap : heap_list_)
                {
                    bool allocated = false;

                    {
                        util::unlock_guard<unique_lock_type> ul(guard);
                        allocated = heap->alloc(&p, count);
                    }

                    if (allocated)
                    {
#if defined(HPX_DEBUG)
                        // Allocation succeeded, update statistics.
                        alloc_count_ += count;
                        if (alloc_count_ - free_count_ > max_alloc_count_)
                            max_alloc_count_ = alloc_count_- free_count_;
#endif
                        return p;
                    }

#if defined(HPX_DEBUG)
                    LOSH_(info) << hpx::util::format(
                        "{1}::alloc: failed to allocate from heap[{2}] "
                        "(heap[{2}] has allocated {3} objects and has "
                        "space for {4} more objects)",
                        name(),
                        heap->heap_count(),
                        heap->size(),
                        heap->free_size());
#endif
                }
            }
        }

        // Create new heap.
        bool did_create = false;
        {
#if defined(HPX_DEBUG)
            heap_list_.push_front(create_heap_(
                class_name_.c_str(), heap_count_ + 1, parameters_));
#else
            heap_list_.push_front(create_heap_(
                class_name_.c_str(), 0, parameters_));
#endif

            iterator itnew = heap_list_.begin();
            typename list_type::value_type heap = *itnew;
            bool result = false;

            {
                util::unlock_guard<unique_lock_type> ul(guard);
                result = heap->alloc((void**)&p, count);
            }

            if (HPX_UNLIKELY(!result || nullptr == p))
            {
                // out of memory
                guard.unlock();
                HPX_THROW_EXCEPTION(out_of_memory,
                    name() + "::alloc",
                    hpx::util::format(
                        "new heap failed to allocate {1} objects",
                        count));
            }

#if defined(HPX_DEBUG)
            alloc_count_ += count;
            ++heap_count_;

            LOSH_(info) << hpx::util::format(
                "{1}::alloc: creating new heap[{2}], size is now {3}",
                name(),
                heap_count_,
                heap_list_.size());
#endif
            did_create = true;
        }

        if (did_create)
            return p;

        guard.unlock();

        // Try again, we just got a new heap, so we should be good.
        return alloc(count);
    }
Exemplo n.º 16
0
// remote call to AGAS
void register_worker(registration_header const& header)
{
    // This lock acquires the bbb mutex on creation. When it goes out of scope,
    // it's dtor calls big_boot_barrier::notify().
    big_boot_barrier::scoped_lock lock(get_big_boot_barrier());

    naming::resolver_client& agas_client = get_runtime().get_agas_client();

    if (HPX_UNLIKELY(agas_client.is_connecting()))
    {
        HPX_THROW_EXCEPTION(internal_server_error
            , "agas::register_worker"
            , "runtime_mode_connect can't find running application.");
    }

    if (HPX_UNLIKELY(!agas_client.is_bootstrap()))
    {
        HPX_THROW_EXCEPTION(internal_server_error
            , "agas::register_worker"
            , "registration parcel received by non-bootstrap locality.");
    }

    naming::gid_type prefix
                   , parcel_lower, parcel_upper
                   , heap_lower, heap_upper;

    agas_client.register_locality(header.locality, prefix);

    agas_client.get_id_range(header.locality, header.parcelport_allocation
                           , parcel_lower, parcel_upper);
    agas_client.get_id_range(header.locality, header.response_allocation
                           , heap_lower, heap_upper);

    naming::gid_type runtime_support_gid(prefix.get_msb()
                                       , header.component_runtime_support_ptr)
                   , memory_gid(prefix.get_msb()
                              , header.component_memory_ptr);

    naming::address runtime_support_address
        (header.locality,
         components::get_component_type<components::server::runtime_support>(),
         header.component_runtime_support_ptr);

    naming::address memory_address
        (header.locality,
         components::get_component_type<components::server::memory>(),
         header.component_memory_ptr);

    agas_client.bind(runtime_support_gid, runtime_support_address);
    agas_client.bind(memory_gid, memory_address);

    agas_client.bind_range(heap_lower, header.response_allocation
                         , header.response_heap_address
                         , header.response_heap_offset);

    naming::address primary_addr(get_runtime().here(),
        server::primary_namespace::get_component_type(),
            static_cast<void*>(&agas_client.bootstrap->primary_ns_server));
    naming::address component_addr(get_runtime().here(),
        server::component_namespace::get_component_type(),
            static_cast<void*>(&agas_client.bootstrap->component_ns_server));
    naming::address symbol_addr(get_runtime().here(),
        server::symbol_namespace::get_component_type(),
            static_cast<void*>(&agas_client.bootstrap->symbol_ns_server));

    actions::base_action* p =
        new actions::transfer_action<notify_console_action>(
            notification_header(
                prefix, header.response_heap_address, header.response_heap_ptr,
                heap_lower, heap_upper, parcel_lower, parcel_upper,
                primary_addr, component_addr, symbol_addr));

    // FIXME: This could screw with startup.

    // TODO: Handle cases where localities try to connect to AGAS while it's
    // shutting down.
    if (agas_client.status() != starting)
    {
        // We can just send the parcel now, the connecting locality isn't a part
        // of startup synchronization.
        get_big_boot_barrier().apply(naming::get_locality_id_from_gid(prefix)
                                   , naming::address(header.locality), p);
    }

    else // AGAS is starting up; this locality is participating in startup
    {    // synchronization.
        HPX_STD_FUNCTION<void()>* thunk = new HPX_STD_FUNCTION<void()>
            (boost::bind(&big_boot_barrier::apply
                       , boost::ref(get_big_boot_barrier())
                       , naming::get_locality_id_from_gid(prefix)
                       , naming::address(header.locality)
                       , p));

        get_big_boot_barrier().add_thunk(thunk);
    }
}
Exemplo n.º 17
0
        /// This is a function which gets called periodically by the thread
        /// manager to allow for maintenance tasks to be executed in the
        /// scheduler. Returns true if the OS thread calling this function
        /// has to be terminated (i.e. no more work has to be done).
        virtual bool wait_or_add_new(std::size_t num_thread, bool running,
            std::int64_t& idle_loop_count)
        {
            std::size_t added = 0;
            bool result = true;

            std::size_t high_priority_queues = high_priority_queues_.size();
            thread_queue_type* this_high_priority_queue = nullptr;
            thread_queue_type* this_queue = queues_[num_thread];

            if (num_thread < high_priority_queues)
            {
                this_high_priority_queue = high_priority_queues_[num_thread];
                result = this_high_priority_queue->wait_or_add_new(running,
                            idle_loop_count, added)
                        && result;
                if (0 != added) return result;
            }

            result = this_queue->wait_or_add_new(
                running, idle_loop_count, added) && result;
            if (0 != added) return result;

            for (std::size_t idx: victim_threads_[num_thread])
            {
                HPX_ASSERT(idx != num_thread);

                if (idx < high_priority_queues &&
                    num_thread < high_priority_queues)
                {
                    thread_queue_type* q =  high_priority_queues_[idx];
                    result = this_high_priority_queue->
                        wait_or_add_new(running, idle_loop_count,
                            added, q)
                      && result;

                    if (0 != added)
                    {
                        q->increment_num_stolen_from_staged(added);
                        this_high_priority_queue->
                            increment_num_stolen_to_staged(added);
                        return result;
                    }
                }

                result = this_queue->wait_or_add_new(running,
                    idle_loop_count, added, queues_[idx]) && result;
                if (0 != added)
                {
                    queues_[idx]->increment_num_stolen_from_staged(added);
                    this_queue->increment_num_stolen_to_staged(added);
                    return result;
                }
            }

#ifdef HPX_HAVE_THREAD_MINIMAL_DEADLOCK_DETECTION
            // no new work is available, are we deadlocked?
            if (HPX_UNLIKELY(minimal_deadlock_detection && LHPX_ENABLED(error)))
            {
                bool suspended_only = true;

                for (std::size_t i = 0; suspended_only && i != queues_.size(); ++i) {
                    suspended_only = queues_[i]->dump_suspended_threads(
                        i, idle_loop_count, running);
                }

                if (HPX_UNLIKELY(suspended_only)) {
                    if (running) {
                        LTM_(error) //-V128
                            << "queue(" << num_thread << "): "
                            << "no new work available, are we deadlocked?";
                    }
                    else {
                        LHPX_CONSOLE_(hpx::util::logging::level::error) //-V128
                              << "  [TM] " //-V128
                              << "queue(" << num_thread << "): "
                              << "no new work available, are we deadlocked?\n";
                    }
                }
            }
#endif

            result = low_priority_queue_.wait_or_add_new(running,
                idle_loop_count, added) && result;
            if (0 != added) return result;

            return result;
        }
Exemplo n.º 18
0
        // operations
        value_type* alloc(std::size_t count = 1)
        {
            if (HPX_UNLIKELY(0 == count))
            {
                HPX_THROW_EXCEPTION(bad_parameter,
                    name() + "::alloc",
                    "cannot allocate 0 objects");
            }

            std::size_t size = 0;
            value_type* p = NULL;
            {
                shared_lock_type guard(mtx_);

                size = heap_list_.size();
                if (size)
                {
                    for (iterator it = heap_list_.begin(); it != heap_list_.end(); ++it)
                    {
                        if ((*it)->alloc(&p, count))
                        {
                            // Allocation succeeded, update statistics.
                            alloc_count_ += count;

                            if (alloc_count_ - free_count_ > max_alloc_count_)
                                max_alloc_count_ = alloc_count_- free_count_;

                            return p;
                        }

                        LOSH_(info)
                            << (boost::format(
                                "%1%::alloc: failed to allocate from heap[%2%] "
                                "(heap[%2%] has allocated %3% objects and has "
                                "space for %4% more objects)")
                                % name()
                                % (*it)->heap_count_
                                % (*it)->size()
                                % (*it)->free_size());
                    }
                }
            }

            // Create new heap.
            bool did_create = false;
            {
                // Acquire exclusive access.
                unique_lock_type ul(mtx_);

                iterator itnew = heap_list_.insert(heap_list_.begin(),
                    typename list_type::value_type(new heap_type
                        (class_name_.c_str(), heap_count_ + 1, heap_step)));

                if (HPX_UNLIKELY(itnew == heap_list_.end()))
                {
                    HPX_THROW_EXCEPTION(out_of_memory,
                        name() + "::alloc",
                        "new heap could not be added");
                }

                bool result = (*itnew)->alloc(&p, count);

                if (HPX_UNLIKELY(!result || NULL == p))
                {
                    // out of memory
                    HPX_THROW_EXCEPTION(out_of_memory,
                        name() + "::alloc",
                        boost::str(boost::format(
                            "new heap failed to allocate %1% objects")
                            % count));
                }

                alloc_count_ += count;
                ++heap_count_;

                LOSH_(info)
                    << (boost::format(
                        "%1%::alloc: creating new heap[%2%], size is now %3%")
                        % name()
                        % heap_count_
                        % heap_list_.size());
                did_create = true;
            }

            if (did_create)
                return p;

            // Try again, we just got a new heap, so we should be good.
            return alloc(count);
        }
Exemplo n.º 19
0
    // schedule threads based on given parcel
    void applier::schedule_action(parcelset::parcel const& p)
    {
        // decode the action-type in the parcel
        actions::continuation_type cont = p.get_continuation();
        actions::action_type act = p.get_action();

#if defined(HPX_HAVE_SECURITY)
        // we look up the certificate of the originating locality, no matter
        // whether this parcel was routed through another locality or not
        boost::uint32_t locality_id =
            naming::get_locality_id_from_gid(p.get_parcel_id());
        error_code ec(lightweight);
        components::security::signed_certificate const& cert =
            get_locality_certificate(locality_id, ec);

        if (verify_capabilities_ && ec) {
            // we should have received the sender's certificate by now
            HPX_THROW_EXCEPTION(security_error,
                "applier::schedule_action",
                boost::str(boost::format("couldn't extract sender's "
                    "certificate (sender locality id: %1%)") % locality_id));
            return;
        }

        components::security::capability caps_sender;
        if (verify_capabilities_)
            caps_sender = cert.get_type().get_capability();
#endif
        int comptype = act->get_component_type();
        naming::locality dest = p.get_destination_locality();

        // fetch the set of destinations
        std::size_t size = p.size();
        naming::id_type const* ids = p.get_destinations();
        naming::address const* addrs = p.get_destination_addrs();

        // if the parcel carries a continuation it should be directed to a
        // single destination
        HPX_ASSERT(!cont || size == 1);

        // schedule a thread for each of the destinations
        for (std::size_t i = 0; i != size; ++i)
        {
            naming::address const& addr = addrs[i];

            // make sure this parcel destination matches the proper locality
            HPX_ASSERT(dest == addr.locality_);

            // decode the local virtual address of the parcel
            naming::address::address_type lva = addr.address_;

            // by convention, a zero address references the local runtime
            // support component
            if (0 == lva)
            {
                lva = get_runtime_support_raw_gid().get_lsb();
            }
            else if (comptype == components::component_memory)
            {
                HPX_ASSERT(naming::refers_to_virtual_memory(ids[i].get_gid()));
                lva = get_memory_raw_gid().get_lsb();
            }

#if defined(HPX_HAVE_SECURITY)
            if (verify_capabilities_) {
                components::security::capability caps_action =
                    act->get_required_capabilities(lva);

                if (caps_action.verify(caps_sender) == false) {
                    HPX_THROW_EXCEPTION(security_error,
                        "applier::schedule_action",
                        boost::str(boost::format("sender has insufficient capabilities "
                            "to execute the action (%1%, sender: %2%, action %3%)") %
                            act->get_action_name() % caps_sender % caps_action));
                    return;
                }
            }
#endif
            // make sure the component_type of the action matches the
            // component type in the destination address
            if (HPX_UNLIKELY(!components::types_are_compatible(
                addr.type_, comptype)))
            {
                hpx::util::osstream strm;
                strm << " types are not compatible: destination_type("
                      << addr.type_ << ") action_type(" << comptype
                      << ") parcel ("  << p << ")";
                HPX_THROW_EXCEPTION(bad_component_type,
                    "action_manager::fetch_parcel",
                    hpx::util::osstream_get_string(strm));
            }

            // dispatch action, register work item either with or without
            // continuation support
            if (!cont) {
                // No continuation is to be executed, register the plain
                // action and the local-virtual address.
                act->schedule_thread(ids[i], lva, threads::pending);
            }
            else {
                // This parcel carries a continuation, register a wrapper
                // which first executes the original thread function as
                // required by the action and triggers the continuations
                // afterwards.
                act->schedule_thread(cont, ids[i], lva, threads::pending);
            }
        }
    }
Exemplo n.º 20
0
    inline thread_state set_thread_state(
        thread_id_type const& thrd, thread_state_enum new_state,
        thread_state_ex_enum new_state_ex, thread_priority priority,
        std::size_t thread_num, error_code& ec)
    {
        if (HPX_UNLIKELY(!thrd)) {
            HPX_THROWS_IF(ec, null_thread_id, "threads::detail::set_thread_state",
                "NULL thread id encountered");
            return thread_state(unknown);
        }

        // set_state can't be used to force a thread into active state
        if (new_state == threads::active) {
            std::ostringstream strm;
            strm << "invalid new state: " << get_thread_state_name(new_state);
            HPX_THROWS_IF(ec, bad_parameter, "threads::detail::set_thread_state",
                strm.str());
            return thread_state(unknown);
        }

        // we know that the id is actually the pointer to the thread
        if (!thrd) {
            if (&ec != &throws)
                ec = make_success_code();
            return thread_state(terminated);     // this thread has already been terminated
        }

        thread_state previous_state;
        do {
            // action depends on the current state
            previous_state = thrd->get_state();
            thread_state_enum previous_state_val = previous_state;

            // nothing to do here if the state doesn't change
            if (new_state == previous_state_val) {
                LTM_(warning)
                    << "set_thread_state: old thread state is the same as new "
                       "thread state, aborting state change, thread("
                    << thrd.get() << "), description("
                    << thrd->get_description() << "), new state("
                    << get_thread_state_name(new_state) << ")";

                if (&ec != &throws)
                    ec = make_success_code();

                return thread_state(new_state);
            }

            // the thread to set the state for is currently running, so we
            // schedule another thread to execute the pending set_state
            switch (previous_state_val) {
            case active:
                {
                    // schedule a new thread to set the state
                    LTM_(warning)
                        << "set_thread_state: thread is currently active, scheduling "
                            "new thread, thread(" << thrd.get() << "), description("
                        << thrd->get_description() << "), new state("
                        << get_thread_state_name(new_state) << ")";

                    thread_init_data data(
                        boost::bind(&set_active_state,
                            thrd, new_state, new_state_ex,
                            priority, previous_state),
                        "set state for active thread", 0, priority);

                    create_work(thrd->get_scheduler_base(), data, pending, ec);

                    if (&ec != &throws)
                        ec = make_success_code();

                    return previous_state;     // done
                }
                break;
            case terminated:
                {
                    LTM_(warning)
                        << "set_thread_state: thread is terminated, aborting state "
                            "change, thread(" << thrd.get() << "), description("
                        << thrd->get_description() << "), new state("
                        << get_thread_state_name(new_state) << ")";

                    if (&ec != &throws)
                        ec = make_success_code();

                    // If the thread has been terminated while this set_state was
                    // pending nothing has to be done anymore.
                    return previous_state;
                }
                break;
            case pending:
                if (suspended == new_state) {
                    // we do not allow explicit resetting of a state to suspended
                    // without the thread being executed.
                    std::ostringstream strm;
                    strm << "set_thread_state: invalid new state, can't demote a "
                            "pending thread, "
                         << "thread(" << thrd.get() << "), description("
                         << thrd->get_description() << "), new state("
                         << get_thread_state_name(new_state) << ")";

                    LTM_(fatal) << strm.str();

                    HPX_THROWS_IF(ec, bad_parameter,
                        "threads::detail::set_thread_state",
                        strm.str());
                    return thread_state(unknown);
                }
                break;
            case suspended:
                break;      // fine, just set the new state
            default:
                HPX_ASSERT(false);    // should not happen
                break;
            }

            // If the previous state was pending we are supposed to remove the
            // thread from the queue. But in order to avoid linearly looking
            // through the queue we defer this to the thread function, which
            // at some point will ignore this thread by simply skipping it
            // (if it's not pending anymore).

            LTM_(info) << "set_thread_state: thread(" << thrd.get() << "), "
                          "description(" << thrd->get_description() << "), "
                          "new state(" << get_thread_state_name(new_state) << "), "
                          "old state(" << get_thread_state_name(previous_state_val)
                       << ")";

            // So all what we do here is to set the new state.
            if (thrd->restore_state(new_state, previous_state)) {
                thrd->set_state_ex(new_state_ex);
                break;
            }

            // state has changed since we fetched it from the thread, retry
            LTM_(error)
                << "set_thread_state: state has been changed since it was fetched, "
                   "retrying, thread(" << thrd.get() << "), "
                   "description(" << thrd->get_description() << "), "
                   "new state(" << get_thread_state_name(new_state) << "), "
                   "old state(" << get_thread_state_name(previous_state_val)
                << ")";
        } while (true);

        if (new_state == pending) {
            // REVIEW: Passing a specific target thread may interfere with the
            // round robin queuing.
            thrd->get_scheduler_base()->schedule_thread(thrd.get(), thread_num, priority);
            thrd->get_scheduler_base()->do_some_work(thread_num);
        }

        if (&ec != &throws)
            ec = make_success_code();

        return previous_state;
    }
Exemplo n.º 21
0
        // operations
        void* alloc(std::size_t count = 1)
        {
            unique_lock_type guard(mtx_);

            if (HPX_UNLIKELY(0 == count))
            {
                HPX_THROW_EXCEPTION(bad_parameter,
                    name() + "::alloc",
                    "cannot allocate 0 objects");
            }

            //std::size_t size = 0;
            value_type* p = NULL;
            {
                if (!heap_list_.empty())
                {
                    //size = heap_list_.size();
                    for (iterator it = heap_list_.begin(); it != heap_list_.end(); ++it)
                    {
                        typename list_type::value_type heap = *it;
                        bool allocated = false;

                        {
                            util::scoped_unlock<unique_lock_type> ul(guard);
                            allocated = heap->alloc(&p, count);
                        }

                        if (allocated)
                        {
#if defined(HPX_DEBUG)
                            // Allocation succeeded, update statistics.
                            alloc_count_ += count;
                            if (alloc_count_ - free_count_ > max_alloc_count_)
                                max_alloc_count_ = alloc_count_- free_count_;
#endif
                            return p;
                        }

#if defined(HPX_DEBUG)
                        LOSH_(info)
                            << (boost::format(
                                "%1%::alloc: failed to allocate from heap[%2%] "
                                "(heap[%2%] has allocated %3% objects and has "
                                "space for %4% more objects)")
                                % name()
                                % (*it)->heap_count_
                                % (*it)->size()
                                % (*it)->free_size());
#endif
                    }
                }
            }

            // Create new heap.
            bool did_create = false;
            {
#if defined(HPX_DEBUG)
                heap_list_.push_front(typename list_type::value_type(
                    new heap_type(class_name_.c_str(), heap_count_ + 1, heap_step)));
#else
                heap_list_.push_front(typename list_type::value_type(
                    new heap_type(class_name_.c_str(), 0, heap_step)));
#endif

                iterator itnew = heap_list_.begin();
                typename list_type::value_type heap = *itnew;
                bool result = false;

                {
                    util::scoped_unlock<unique_lock_type> ul(guard);
                    result = heap->alloc(&p, count);
                }

                if (HPX_UNLIKELY(!result || NULL == p))
                {
                    // out of memory
                    HPX_THROW_EXCEPTION(out_of_memory,
                        name() + "::alloc",
                        boost::str(boost::format(
                            "new heap failed to allocate %1% objects")
                            % count));
                }

#if defined(HPX_DEBUG)
                alloc_count_ += count;
                ++heap_count_;

                LOSH_(info)
                    << (boost::format(
                        "%1%::alloc: creating new heap[%2%], size is now %3%")
                        % name()
                        % heap_count_
                        % heap_list_.size());
#endif
                did_create = true;
            }

            if (did_create)
                return p;

            guard.unlock();

            // Try again, we just got a new heap, so we should be good.
            return alloc(count);
        }
Exemplo n.º 22
0
 ~object_semaphore()
 { // {{{
     if (HPX_UNLIKELY(!thread_queue_.empty()))
         abort_pending(deadlock);
 } // }}}
Exemplo n.º 23
0
    ///////////////////////////////////////////////////////////////////////////
    // Call-back function for parcelHandler to call when new parcels are received
    void action_manager::fetch_parcel(
        parcelset::parcelhandler& parcel_handler,
        naming::gid_type const& parcel_id)
    {
        parcelset::parcel p;
        if (!parcel_handler.get_parcel(p, parcel_id))
            return;

        while (threads::threadmanager_is(starting))
        {
            boost::this_thread::sleep(boost::get_system_time() +
                boost::posix_time::milliseconds(HPX_NETWORK_RETRIES_SLEEP));
        }

        // Give up if we're shutting down.
        if (threads::threadmanager_is(stopping))
        {
            LPT_(debug) << "action_manager: fetch_parcel: dropping late "
                            "parcel " << p;
            return;
        }

        // write this parcel to the log
        LPT_(debug) << "action_manager: fetch_parcel: " << p;

        // decode the action-type in the parcel
        continuation_type cont = p.get_continuation();
        actions::action_type act = p.get_action();
        actions::base_action::action_type acttype = act->get_action_type();
        int comptype = act->get_component_type();
        naming::locality dest = p.get_destination_locality();

        // fetch the set of destinations
        std::vector<naming::address> const& addrs = p.get_destination_addrs();

        // if the parcel carries a continuation it should be directed to a
        // single destination
        BOOST_ASSERT(!cont || addrs.size() == 1);

        // schedule a thread for each of the destinations
        threads::threadmanager_base& tm = appl_.get_thread_manager();
        std::vector<naming::address>::const_iterator end = addrs.end();
        for (std::vector<naming::address>::const_iterator it = addrs.begin();
             it != end; ++it)
        {
            naming::address const& addr = *it;

            // make sure this parcel destination matches the proper locality
            BOOST_ASSERT(dest == addr.locality_);

            // decode the local virtual address of the parcel
            naming::address::address_type lva = addr.address_;

            // by convention, a zero address references the local runtime
            // support component
            if (0 == lva)
                lva = appl_.get_runtime_support_raw_gid().get_lsb();

            // make sure the component_type of the action matches the
            // component type in the destination address
            if (HPX_UNLIKELY(!components::types_are_compatible(
                addr.type_, comptype)))
            {
                hpx::util::osstream strm;
                strm << " types are not compatible: destination_type("
                      << addr.type_ << ") action_type(" << comptype
                      << ") parcel ("  << p << ")";
                HPX_THROW_EXCEPTION(bad_component_type,
                    "action_manager::fetch_parcel",
                    hpx::util::osstream_get_string(strm));
            }

            // either directly execute the action or create a new thread
            if (actions::base_action::direct_action == acttype)
            {
                // direct execution of the action
                if (!cont) {
                    // No continuation is to be executed.
                    act->get_thread_function(lva)(threads::wait_signaled);
                }
                else {
                    // This parcel carries a continuation, we execute a wrapper
                    // handling all related functionality.
                    act->get_thread_function(cont, lva)(threads::wait_signaled);
                }
            }
            else {
                // dispatch action, register work item either with or without
                // continuation support
                if (!cont) {
                    // No continuation is to be executed, register the plain 
                    // action and the local-virtual address with the TM only.
                    threads::thread_init_data data;
                    tm.register_work(
                        act->get_thread_init_data(lva, data),
                        threads::pending);
                }
                else {
                    // This parcel carries a continuation, register a wrapper 
                    // which first executes the original thread function as 
                    // required by the action and triggers the continuations 
                    // afterwards.
                    threads::thread_init_data data;
                    tm.register_work(
                        act->get_thread_init_data(cont, lva, data),
                        threads::pending);
                }
            }
        }
    }
Exemplo n.º 24
0
    // schedule threads based on given parcel
    void applier::schedule_action(parcelset::parcel p, std::size_t num_thread)
    {
        // fetch the set of destinations
#if !defined(HPX_SUPPORT_MULTIPLE_PARCEL_DESTINATIONS)
        std::size_t const size = 1ul;
#else
        std::size_t const size = p.size();
#endif
        naming::id_type const* ids = p.destinations();
        naming::address const* addrs = p.addrs();

        // decode the action-type in the parcel
        std::unique_ptr<actions::continuation> cont = p.get_continuation();
        actions::base_action * act = p.get_action();

#if defined(HPX_HAVE_SECURITY)
        // we look up the certificate of the originating locality, no matter
        // whether this parcel was routed through another locality or not
        boost::uint32_t locality_id =
            naming::get_locality_id_from_gid(p.get_parcel_id());
        error_code ec(lightweight);
        components::security::signed_certificate const& cert =
            get_locality_certificate(locality_id, ec);

        if (verify_capabilities_ && ec) {
            // we should have received the sender's certificate by now
            HPX_THROW_EXCEPTION(security_error,
                "applier::schedule_action",
                boost::str(boost::format("couldn't extract sender's "
                    "certificate (sender locality id: %1%)") % locality_id));
            return;
        }

        components::security::capability caps_sender;
        if (verify_capabilities_)
            caps_sender = cert.get_type().get_capability();
#endif
        int comptype = act->get_component_type();
        naming::gid_type dest = p.destination_locality();

        // if the parcel carries a continuation it should be directed to a
        // single destination
        HPX_ASSERT(!cont || size == 1);

        naming::resolver_client& client = hpx::naming::get_agas_client();

        // schedule a thread for each of the destinations
        for (std::size_t i = 0; i != size; ++i)
        {
            naming::address const& addr = addrs[i];

            // make sure this parcel destination matches the proper locality
            HPX_ASSERT(dest == addr.locality_);

            // decode the local virtual address of the parcel
            naming::address::address_type lva = addr.address_;

            // by convention, a zero address references either the local
            // runtime support component or one of the AGAS components
            if (0 == lva)
            {
                switch(comptype)
                {
                case components::component_runtime_support:
                    lva = get_runtime_support_raw_gid().get_lsb();
                    break;

                case components::component_agas_primary_namespace:
                    lva = get_agas_client().get_primary_ns_lva();
                    break;

                case components::component_agas_symbol_namespace:
                    lva = get_agas_client().get_symbol_ns_lva();
                    break;

                case components::component_plain_function:
                    break;

                default:
                    HPX_ASSERT(false);
                }
            }
            else if (comptype == components::component_memory)
            {
                HPX_ASSERT(naming::refers_to_virtual_memory(ids[i].get_gid()));
                lva = get_memory_raw_gid().get_lsb();
            }

            // make sure the target has not been migrated away
            auto r = act->was_object_migrated(ids[i], lva);
            if (r.first)
            {
#if defined(HPX_SUPPORT_MULTIPLE_PARCEL_DESTINATIONS)
                // it's unclear at this point what could be done if there is
                // more than one destination
                HPX_ASSERT(size == 1);
#endif
                // set continuation in outgoing parcel
                if (cont)
                    p.set_continuation(std::move(cont));

                // route parcel to new locality of target
                client.route(
                    std::move(p),
                    util::bind(&detail::parcel_sent_handler,
                        boost::ref(parcel_handler_),
                        util::placeholders::_1, util::placeholders::_2),
                    threads::thread_priority_normal);
                break;
            }

#if defined(HPX_HAVE_SECURITY)
            if (verify_capabilities_) {
                components::security::capability caps_action =
                    act->get_required_capabilities(lva);

                if (caps_action.verify(caps_sender) == false) {
                    HPX_THROW_EXCEPTION(security_error,
                        "applier::schedule_action",
                        boost::str(boost::format("sender has insufficient capabilities "
                            "to execute the action (%1%, sender: %2%, action %3%)") %
                            act->get_action_name() % caps_sender % caps_action));
                    return;
                }
            }
#endif
            // make sure the component_type of the action matches the
            // component type in the destination address
            if (HPX_UNLIKELY(!components::types_are_compatible(
                addr.type_, comptype)))
            {
                std::ostringstream strm;
                strm << " types are not compatible: destination_type("
                      << addr.type_ << ") action_type(" << comptype
                      << ") parcel ("  << p << ")";
                HPX_THROW_EXCEPTION(bad_component_type,
                    "applier::schedule_action",
                    strm.str());
            }

            // dispatch action, register work item either with or without
            // continuation support
            if (!cont) {
                // No continuation is to be executed, register the plain
                // action and the local-virtual address.
                act->schedule_thread(ids[i], lva, threads::pending, num_thread);
            }
            else {
                // This parcel carries a continuation, register a wrapper
                // which first executes the original thread function as
                // required by the action and triggers the continuations
                // afterwards.
                act->schedule_thread(std::move(cont), ids[i], lva,
                    threads::pending, num_thread);
            }
        }
    }
Exemplo n.º 25
0
        ///////////////////////////////////////////////////////////////////////
        // add new threads if there is some amount of work available
        std::size_t add_new(boost::int64_t add_count, thread_queue* addfrom,
            std::unique_lock<mutex_type> &lk, bool steal = false)
        {
            HPX_ASSERT(lk.owns_lock());

            if (HPX_UNLIKELY(0 == add_count))
                return 0;

            std::size_t added = 0;
            task_description* task = 0;
            while (add_count-- && addfrom->new_tasks_.pop(task, steal))
            {
#ifdef HPX_HAVE_THREAD_QUEUE_WAITTIME
                if (maintain_queue_wait_times) {
                    addfrom->new_tasks_wait_ +=
                        util::high_resolution_clock::now() - util::get<2>(*task);
                    ++addfrom->new_tasks_wait_count_;
                }
#endif
                --addfrom->new_tasks_count_;

                // measure thread creation time
                util::block_profiler_wrapper<add_new_tag> bp(add_new_logger_);

                // create the new thread
                threads::thread_init_data& data = util::get<0>(*task);
                thread_state_enum state = util::get<1>(*task);
                threads::thread_id_type thrd;

                create_thread_object(thrd, data, state, lk);

                delete task;

                // add the new entry to the map of all threads
                std::pair<thread_map_type::iterator, bool> p =
                    thread_map_.insert(thrd);

                if (HPX_UNLIKELY(!p.second)) {
                    HPX_THROW_EXCEPTION(hpx::out_of_memory,
                        "threadmanager::add_new",
                        "Couldn't add new thread to the thread map");
                    return 0;
                }
                ++thread_map_count_;

                // only insert the thread into the work-items queue if it is in
                // pending state
                if (state == pending) {
                    // pushing the new thread into the pending queue of the
                    // specified thread_queue
                    ++added;
                    schedule_thread(thrd.get());
                }

                // this thread has to be in the map now
                HPX_ASSERT(thread_map_.find(thrd.get()) != thread_map_.end());
                HPX_ASSERT(thrd->get_pool() == &memory_pool_);
            }

            if (added) {
                LTM_(debug) << "add_new: added " << added << " tasks to queues"; //-V128
            }
            return added;
        }
Exemplo n.º 26
0
response component_namespace::bind_prefix(
    request const& req
  , error_code& ec
    )
{ // {{{ bind_prefix implementation
    // parameters
    std::string key = req.get_name();
    boost::uint32_t prefix = req.get_locality_id();

    std::unique_lock<mutex_type> l(mutex_);

    component_id_table_type::left_map::iterator cit = component_ids_.left.find(key)
                                    , cend = component_ids_.left.end();

    // This is the first request, so we use the type counter, and then
    // increment it.
    if (component_ids_.left.find(key) == cend)
    {
        if (HPX_UNLIKELY(!util::insert_checked(component_ids_.left.insert(
                std::make_pair(key, type_counter)), cit)))
        {
            l.unlock();

            HPX_THROWS_IF(ec, lock_error
              , "component_namespace::bind_prefix"
              , "component id table insertion failed due to a locking "
                "error or memory corruption");
            return response();
        }

        // If the insertion succeeded, we need to increment the type
        // counter.
        ++type_counter;
    }

    factory_table_type::iterator fit = factories_.find(cit->second)
                               , fend = factories_.end();

    if (fit != fend)
    {
        prefixes_type& prefixes = fit->second;
        prefixes_type::iterator pit = prefixes.find(prefix);

        if (pit != prefixes.end())
        {
            // Duplicate type registration for this locality.
            l.unlock();

            HPX_THROWS_IF(ec, duplicate_component_id
              , "component_namespace::bind_prefix"
              , boost::str(boost::format(
                    "component id is already registered for the given "
                    "locality, key(%1%), prefix(%2%), ctype(%3%)")
                    % key % prefix % cit->second));
            return response();
        }

        fit->second.insert(prefix);

        // First registration for this locality, we still return no_success to
        // convey the fact that another locality already registered this
        // component type.
        LAGAS_(info) << (boost::format(
            "component_namespace::bind_prefix, key(%1%), prefix(%2%), "
            "ctype(%3%), response(no_success)")
            % key % prefix % cit->second);

        if (&ec != &throws)
            ec = make_success_code();

        return response(component_ns_bind_prefix
                      , cit->second
                      , no_success);
    }

    // Instead of creating a temporary and then inserting it, we insert
    // an empty set, then put the prefix into said set. This should
    // prevent a copy, though most compilers should be able to optimize
    // this without our help.
    if (HPX_UNLIKELY(!util::insert_checked(factories_.insert(
            std::make_pair(cit->second, prefixes_type())), fit)))
    {
        l.unlock();

        HPX_THROWS_IF(ec, lock_error
            , "component_namespace::bind_prefix"
            , "factory table insertion failed due to a locking "
              "error or memory corruption");
        return response();
    }

    fit->second.insert(prefix);

    LAGAS_(info) << (boost::format(
        "component_namespace::bind_prefix, key(%1%), prefix(%2%), ctype(%3%)")
        % key % prefix % cit->second);

    if (&ec != &throws)
        ec = make_success_code();

    return response(component_ns_bind_prefix, cit->second);
} // }}}