示例#1
0
    bool coalescing_message_handler::flush(
        boost::unique_lock<mutex_type>& l,
        bool stop_buffering)
    {
        HPX_ASSERT(l.owns_lock());

        if (!stopped_ && stop_buffering) {
            stopped_ = true;

            util::unlock_guard<boost::unique_lock<mutex_type> > ul(l);
            timer_.stop();              // interrupt timer
        }

        if (buffer_.empty())
            return false;

        detail::message_buffer buff (buffer_.capacity());
        std::swap(buff, buffer_);

        l.unlock();

        HPX_ASSERT(NULL != pp_);
        buff(pp_);                   // 'invoke' the buffer

        return true;
    }
示例#2
0
void
condition_variable::wait( boost::unique_lock< mutex >& lock )
{
  assert( lock.owns_lock( ) );
  boost::unique_lock< mutex > local_lock( *lock.release( ), boost::adopt_lock );
  better_lock lock_std( mt );
  global_thr_pool.yield( [&]( coroutine running ) {
    lock_unlocker< better_lock > l_unlock_std_mt( lock_std );
    lock_unlocker< boost::unique_lock< mutex > > l_unlock_co_mt( local_lock );

    waiting_cors.emplace_back( std::move( running ) );
  } );

  // If this coroutine is resumed by a thread different from the one that
  // yielded,
  // then it is possible that the changes to internal state of the 'lock'
  // variable will
  // not have been yet acknowledged by the resuming thread.
  // This special case will lead to a difficult to debug race condition
  // involving
  // the owns_lock member variable.
  // This is why we use local_lock.
  assert( !local_lock.owns_lock( ) );
  lock = boost::unique_lock< mutex >( *local_lock.release( ) );
}
示例#3
0
future_status result::wait_until(time_point const& wait_timeout_time,
						boost::unique_lock<boost::mutex>& lock) const
{
	if(ready(lock))
		return future_status::ready;

	while(!expired() && !(clock_type::now() > wait_timeout_time))
	{
		// note: don't set to wait for 0 ms, it might not timeout
		if(m_condition.wait_for(lock, boost::chrono::milliseconds(1),
			boost::bind(&result::ready, this, boost::ref(lock))))
		{
			return future_status::ready;
		}

		if(expired() || (clock_type::now() > wait_timeout_time))
			break;

		lock.unlock();
		io_runner::poll_one(*m_io_service);
		lock.lock();
	}

	if(clock_type::now() > wait_timeout_time)
		return future_status::timeout;

	BOOST_ASSERT(expired());

	m_exception = boost::make_shared<remote_error>
		(remote_error::timeout, "remote call timeout");

	m_ready = true;
	return future_status::ready;
}
int main()
{
  boost::mutex m;
  m.lock();
#if ! defined(BOOST_NO_CXX11_AUTO_DECLARATIONS)
  auto
#else
  boost::unique_lock<boost::mutex>
#endif
  lk = boost::make_unique_lock(m, boost::adopt_lock);
  BOOST_TEST(lk.mutex() == &m);
  BOOST_TEST(lk.owns_lock() == true);

  return boost::report_errors();
}
示例#5
0
    // assumes that this thread has acquired l
    void resume(boost::unique_lock<mutex_type>& l)
    { // {{{
        HPX_ASSERT(l.owns_lock());

        // resume as many waiting LCOs as possible
        while (!thread_queue_.empty() && !value_queue_.empty())
        {
            ValueType value = value_queue_.front().val_;

            HPX_ASSERT(0 != value_queue_.front().count_);

            if (1 == value_queue_.front().count_)
            {
                value_queue_.front().count_ = 0;
                value_queue_.pop_front();
            }

            else
                --value_queue_.front().count_;

            naming::id_type id = thread_queue_.front().id_;
            thread_queue_.front().id_ = naming::invalid_id;
            thread_queue_.pop_front();

            {
                util::unlock_guard<boost::unique_lock<mutex_type> > ul(l);

                // set the LCO's result
                applier::trigger(id, std::move(value));
            }
        }
    } // }}}
示例#6
0
文件: receiver.hpp 项目: devangb/hpx
        connection_ptr accept_locked(boost::unique_lock<mutex_type> & header_lock)
        {
            connection_ptr res;
            util::mpi_environment::scoped_try_lock l;

            if(l.locked)
            {
                MPI_Status status;
                if(request_done_locked(hdr_request_, &status))
                {
                    header h = new_header();
                    l.unlock();
                    header_lock.unlock();
                    res.reset(
                        new connection_type(
                            status.MPI_SOURCE
                          , h
                          , pp_
                        )
                    );
                    return res;
                }
            }
            return res;
        }
            void signal_locked(boost::int64_t count,
                boost::unique_lock<mutex_type> l)
            {
                HPX_ASSERT(l.owns_lock());

                mutex_type* mtx = l.mutex();
                // release no more threads than we get resources
                value_ += count;
                for (boost::int64_t i = 0; value_ >= 0 && i < count; ++i)
                {
                    // notify_one() returns false if no more threads are
                    // waiting
                    if (!cond_.notify_one(std::move(l)))
                        break;
                    l = boost::unique_lock<mutex_type>(*mtx);
                }
            }
示例#8
0
void Bank::setPlanet(boost::unique_lock<boost::mutex>& lock, int8_t planet) {
    planet_ = planet;
	
	auto dispatcher = GetEventDispatcher();

	lock.unlock();
	
	dispatcher->DispatchMainThread(std::make_shared<CreatureObjectEvent>("CreatureObject::PersistHomeBank", owner_->GetCreature()));
}
示例#9
0
void Bank::setCredits(boost::unique_lock<boost::mutex>& lock, uint32 credits) {
	credits_ = credits;
	auto dispatcher = GetEventDispatcher();

	lock.unlock();
	
	dispatcher->Dispatch(std::make_shared<CreatureObjectEvent>("CreatureObject::BankCredits", owner_->GetCreature()));
	dispatcher->DispatchMainThread(std::make_shared<CreatureObjectEvent>("CreatureObject::PersistBankCredits", owner_->GetCreature()));
	
}
            void wait_locked(boost::int64_t count,
                boost::unique_lock<mutex_type>& l)
            {
                HPX_ASSERT(l.owns_lock());

                while (value_ < count)
                {
                    cond_.wait(l, "counting_semaphore::wait_locked");
                }
                value_ -= count;
            }
示例#11
0
bool StatelessReader::change_received(CacheChange_t* change, boost::unique_lock<boost::recursive_mutex> &lock)
{
    // Only make visible the change if there is not other with bigger sequence number.
    // TODO Revisar si no hay que incluirlo.
    if(!mp_history->thereIsUpperRecordOf(change->writerGUID, change->sequenceNumber))
    {
        if(mp_history->received_change(change, 0))
        {
            if(getListener() != nullptr)
            {
                lock.unlock();
                getListener()->onNewCacheChangeAdded((RTPSReader*)this,change);
                lock.lock();
            }

            mp_history->postSemaphore();
            return true;
        }
    }

	return false;
}
示例#12
0
        bool add_new_always(std::size_t& added, thread_queue* addfrom,
            boost::unique_lock<mutex_type> &lk, bool steal = false)
        {
            HPX_ASSERT(lk.owns_lock());

#ifdef HPX_HAVE_THREAD_CREATION_AND_CLEANUP_RATES
            util::tick_counter tc(add_new_time_);
#endif

            if (0 == addfrom->new_tasks_count_.load(boost::memory_order_relaxed))
                return false;

            // create new threads from pending tasks (if appropriate)
            boost::int64_t add_count = -1;                  // default is no constraint

            // if we are desperate (no work in the queues), add some even if the
            // map holds more than max_count
            if (HPX_LIKELY(max_count_)) {
                std::size_t count = thread_map_.size();
                if (max_count_ >= count + min_add_new_count) { //-V104
                    HPX_ASSERT(max_count_ - count <
                        static_cast<std::size_t>((std::numeric_limits
                            <boost::int64_t>::max)()));
                    add_count = static_cast<boost::int64_t>(max_count_ - count);
                    if (add_count < min_add_new_count)
                        add_count = min_add_new_count;
                    if (add_count > max_add_new_count)
                        add_count = max_add_new_count;
                }
                else if (work_items_.empty()) {
                    add_count = min_add_new_count;    // add this number of threads
                    max_count_ += min_add_new_count;  // increase max_count //-V101
                }
                else {
                    return false;
                }
            }

            std::size_t addednew = add_new(add_count, addfrom, lk, steal);
            added += addednew;
            return addednew != 0;
        }
示例#13
0
        bool add_new_if_possible(std::size_t& added, thread_queue* addfrom,
            boost::unique_lock<mutex_type> &lk, bool steal = false)
        {
            HPX_ASSERT(lk.owns_lock());

#ifdef HPX_HAVE_THREAD_CREATION_AND_CLEANUP_RATES
            util::tick_counter tc(add_new_time_);
#endif

            if (0 == addfrom->new_tasks_count_.load(boost::memory_order_relaxed))
                return false;

            // create new threads from pending tasks (if appropriate)
            boost::int64_t add_count = -1;                  // default is no constraint

            // if the map doesn't hold max_count threads yet add some
            // FIXME: why do we have this test? can max_count_ ever be zero?
            if (HPX_LIKELY(max_count_)) {
                std::size_t count = thread_map_.size();
                if (max_count_ >= count + min_add_new_count) { //-V104
                    HPX_ASSERT(max_count_ - count <
                        static_cast<std::size_t>((std::numeric_limits
                            <boost::int64_t>::max)()));
                    add_count = static_cast<boost::int64_t>(max_count_ - count);
                    if (add_count < min_add_new_count)
                        add_count = min_add_new_count;
                }
                else {
                    return false;
                }
            }

            std::size_t addednew = add_new(add_count, addfrom, lk, steal);
            added += addednew;
            return addednew != 0;
        }
示例#14
0
			log_lock() : lock(thread_support::mutex, boost::get_system_time() + boost::posix_time::seconds(2)) {
				if (!lock.owns_lock())
					NSC_LOG_ERROR("Failed to get mutex: thread_locker");
			}
示例#15
0
 void assert_owns_lock(boost::unique_lock<Mutex> const& l, long)
 {
     HPX_ASSERT(l.owns_lock());
 }
void f()
{
#if defined BOOST_THREAD_USES_CHRONO
  {
    time_point t0 = Clock::now();
#if ! defined(BOOST_NO_CXX11_AUTO_DECLARATIONS)
  auto
#else
  boost::unique_lock<boost::mutex>
#endif
    lk = boost::make_unique_lock(m, boost::try_to_lock);
    BOOST_TEST(lk.owns_lock() == false);
    time_point t1 = Clock::now();
    ns d = t1 - t0 - ms(250);
    // This test is spurious as it depends on the time the thread system switches the threads
    BOOST_TEST(d < ns(50000000)+ms(1000)); // within 50ms
  }
  {
    time_point t0 = Clock::now();
#if ! defined(BOOST_NO_CXX11_AUTO_DECLARATIONS)
  auto
#else
  boost::unique_lock<boost::mutex>
#endif
    lk = boost::make_unique_lock(m, boost::try_to_lock);
    BOOST_TEST(lk.owns_lock() == false);
    time_point t1 = Clock::now();
    ns d = t1 - t0 - ms(250);
    // This test is spurious as it depends on the time the thread system switches the threads
    BOOST_TEST(d < ns(50000000)+ms(1000)); // within 50ms
  }
  {
    time_point t0 = Clock::now();
#if ! defined(BOOST_NO_CXX11_AUTO_DECLARATIONS)
  auto
#else
  boost::unique_lock<boost::mutex>
#endif
    lk = boost::make_unique_lock(m, boost::try_to_lock);
    BOOST_TEST(lk.owns_lock() == false);
    time_point t1 = Clock::now();
    ns d = t1 - t0 - ms(250);
    // This test is spurious as it depends on the time the thread system switches the threads
    BOOST_TEST(d < ns(50000000)+ms(1000)); // within 50ms
  }
  {
    time_point t0 = Clock::now();
    while (true)
    {
#if ! defined(BOOST_NO_CXX11_AUTO_DECLARATIONS)
      auto
#else
      boost::unique_lock<boost::mutex>
#endif
      lk = boost::make_unique_lock(m, boost::try_to_lock);
      if (lk.owns_lock()) break;
    }
    time_point t1 = Clock::now();
    ns d = t1 - t0 - ms(250);
    // This test is spurious as it depends on the time the thread system switches the threads
    BOOST_TEST(d < ns(50000000)+ms(1000)); // within 50ms
  }
#else
//  time_point t0 = Clock::now();
//  {
//    boost::unique_lock<boost::mutex> lk(m, boost::try_to_lock);
//    BOOST_TEST(lk.owns_lock() == false);
//  }
//  {
//    boost::unique_lock<boost::mutex> lk(m, boost::try_to_lock);
//    BOOST_TEST(lk.owns_lock() == false);
//  }
//  {
//    boost::unique_lock<boost::mutex> lk(m, boost::try_to_lock);
//    BOOST_TEST(lk.owns_lock() == false);
//  }
  while (true)
  {
#if ! defined(BOOST_NO_CXX11_AUTO_DECLARATIONS)
  auto
#else
  boost::unique_lock<boost::mutex>
#endif
    lk = boost::make_unique_lock(m, boost::try_to_lock);
    if (lk.owns_lock()) break;
  }
  //time_point t1 = Clock::now();
  //ns d = t1 - t0 - ms(250);
  // This test is spurious as it depends on the time the thread system switches the threads
  //BOOST_TEST(d < ns(50000000)+ms(1000)); // within 50ms
#endif
}
示例#17
0
    bool thread_pool<Scheduler>::run(boost::unique_lock<boost::mutex>& l,
        std::size_t num_threads)
    {
        HPX_ASSERT(l.owns_lock());

        LTM_(info) //-V128
            << "thread_pool::run: " << pool_name_
            << " number of processing units available: " //-V128
            << threads::hardware_concurrency();
        LTM_(info) //-V128
            << "thread_pool::run: " << pool_name_
            << " creating " << num_threads << " OS thread(s)"; //-V128

        if (0 == num_threads) {
            HPX_THROW_EXCEPTION(bad_parameter,
                "thread_pool::run", "number of threads is zero");
        }

#if defined(HPX_HAVE_THREAD_CUMULATIVE_COUNTS) && \
    defined(HPX_HAVE_THREAD_IDLE_RATES)
        // scale timestamps to nanoseconds
        boost::uint64_t base_timestamp = util::hardware::timestamp();
        boost::uint64_t base_time = util::high_resolution_clock::now();
        boost::uint64_t curr_timestamp = util::hardware::timestamp();
        boost::uint64_t curr_time = util::high_resolution_clock::now();

        while ((curr_time - base_time) <= 100000)
        {
            curr_timestamp = util::hardware::timestamp();
            curr_time = util::high_resolution_clock::now();
        }

        if (curr_timestamp - base_timestamp != 0)
        {
            timestamp_scale_ = double(curr_time - base_time) /
                double(curr_timestamp - base_timestamp);
        }

        LTM_(info)
            << "thread_pool::run: " << pool_name_
            << " timestamp_scale: " << timestamp_scale_; //-V128
#endif

        if (!threads_.empty() || sched_.has_reached_state(state_running))
            return true;    // do nothing if already running

        executed_threads_.resize(num_threads);
        executed_thread_phases_.resize(num_threads);
        tfunc_times_.resize(num_threads);
        exec_times_.resize(num_threads);

        try {
            HPX_ASSERT(startup_.get() == 0);
            startup_.reset(
                new boost::barrier(static_cast<unsigned>(num_threads+1))
            );

            // run threads and wait for initialization to complete
            sched_.set_all_states(state_running);

            topology const& topology_ = get_topology();

            std::size_t thread_num = num_threads;
            while (thread_num-- != 0) {
                threads::mask_cref_type mask =
                    sched_.Scheduler::get_pu_mask(topology_, thread_num);

                LTM_(info) //-V128
                    << "thread_pool::run: " << pool_name_
                    << " create OS thread " << thread_num //-V128
                    << ": will run on processing units within this mask: "
#if !defined(HPX_WITH_MORE_THAN_64_THREADS) || \
    (defined(HPX_HAVE_MAX_CPU_COUNT) && HPX_HAVE_MAX_CPU_COUNT <= 64)
                    << std::hex << "0x" << mask;
#else
                    << "0b" << mask;
#endif

                // create a new thread
                threads_.push_back(new boost::thread(
                        util::bind(&thread_pool::thread_func, this, thread_num,
                            boost::ref(topology_), boost::ref(*startup_))
                    ));

                // set the new threads affinity (on Windows systems)
                if (any(mask))
                {
                    error_code ec(lightweight);
                    topology_.set_thread_affinity_mask(threads_.back(), mask, ec);
                    if (ec)
                    {
                        LTM_(warning) //-V128
                            << "thread_pool::run: " << pool_name_
                            << " setting thread affinity on OS thread " //-V128
                            << thread_num << " failed with: "
                            << ec.get_message();
                    }
                }
                else
                {
                    LTM_(debug) //-V128
                        << "thread_pool::run: " << pool_name_
                        << " setting thread affinity on OS thread " //-V128
                        << thread_num << " was explicitly disabled.";
                }
            }

            // the main thread needs to have a unique thread_num
            init_tss(num_threads);
            startup_->wait();
        }
示例#18
0
        ///////////////////////////////////////////////////////////////////////
        // add new threads if there is some amount of work available
        std::size_t add_new(boost::int64_t add_count, thread_queue* addfrom,
            boost::unique_lock<mutex_type> &lk, bool steal = false)
        {
            HPX_ASSERT(lk.owns_lock());

            if (HPX_UNLIKELY(0 == add_count))
                return 0;

            std::size_t added = 0;
            task_description* task = 0;
            while (add_count-- && addfrom->new_tasks_.pop(task, steal))
            {
#ifdef HPX_HAVE_THREAD_QUEUE_WAITTIME
                if (maintain_queue_wait_times) {
                    addfrom->new_tasks_wait_ +=
                        util::high_resolution_clock::now() - util::get<2>(*task);
                    ++addfrom->new_tasks_wait_count_;
                }
#endif
                --addfrom->new_tasks_count_;

                // measure thread creation time
                util::block_profiler_wrapper<add_new_tag> bp(add_new_logger_);

                // create the new thread
                threads::thread_init_data& data = util::get<0>(*task);
                thread_state_enum state = util::get<1>(*task);
                threads::thread_id_type thrd;

                create_thread_object(thrd, data, state, lk);

                delete task;

                // add the new entry to the map of all threads
                std::pair<thread_map_type::iterator, bool> p =
                    thread_map_.insert(thrd);

                if (HPX_UNLIKELY(!p.second)) {
                    HPX_THROW_EXCEPTION(hpx::out_of_memory,
                        "threadmanager::add_new",
                        "Couldn't add new thread to the thread map");
                    return 0;
                }
                ++thread_map_count_;

                // only insert the thread into the work-items queue if it is in
                // pending state
                if (state == pending) {
                    // pushing the new thread into the pending queue of the
                    // specified thread_queue
                    ++added;
                    schedule_thread(thrd.get());
                }

                // this thread has to be in the map now
                HPX_ASSERT(thread_map_.find(thrd.get()) != thread_map_.end());
                HPX_ASSERT(thrd->get_pool() == &memory_pool_);
            }

            if (added) {
                LTM_(debug) << "add_new: added " << added << " tasks to queues"; //-V128
            }
            return added;
        }