Exemplo n.º 1
0
int ThreadPool::threadReaper ()
    {
    TRACE_CALL;

    VxCritSec cs (m_threadCountLock);

    int unusedThreads = threadCount() - queueSize ();

    if ((threadCount () - unusedThreads) < minThreads ())
	unusedThreads -= minThreads ();

    if (queueIsFull () || unusedThreads <= 0)
	return 0;
    
    while (unusedThreads-- > 0)
	threadRemove ();

    return 0;
    }
Exemplo n.º 2
0
int ThreadPool::queueFullHandler ()
    {
    // If we have no scavenger we cannot dynamically add threads.

    if (m_thrScavenger == 0)
	return 0;

    if (threadCount () < maxThreads ())
	return threadAdd ();
    else
	return 0;
    }
Exemplo n.º 3
0
int ThreadPool::close ()
    {
    TRACE_CALL;

    DELZERO (m_thrScavenger);

    removeAll ();

    // Post a NULL event to all possible threads...
    for (int i=0; i < threadCount (); ++i)
	threadRemove ();
    
    // ...and wait for threads to terminate...
    cout << "waiting for threads..." << endl;
    while (threadCount () > 0)
        {
        cout << "...sleeping..." << endl;
        ::taskDelay (1);
        }

    return 0;
    }
Exemplo n.º 4
0
bool WIOService::requestBlockedThread()
{
#ifdef WT_THREADED
  boost::mutex::scoped_lock l(impl_->blockedThreadMutex_);
  if (impl_->blockedThreadCounter_ >= threadCount() - 1)
    return false;
  else {
    impl_->blockedThreadCounter_++;
    return true;
  }
#else
  return false;
#endif
}
  void TaskScheduler::create(size_t numThreads, bool set_affinity, bool start_threads)
  {
    assert(numThreads);

    /* first terminate threads in case we configured them */
    if (g_tbb_threads_initialized) {
      g_tbb_threads.terminate();
      g_tbb_threads_initialized = false;
    }
    
    /* only set affinity if requested by the user */
#if TBB_INTERFACE_VERSION >= 9000 // affinity not properly supported by older TBB versions
    if (set_affinity)
      tbb_affinity.observe(true);
#endif 
    
    /* now either keep default settings or configure number of threads */
    if (numThreads == std::numeric_limits<size_t>::max()) {
      g_tbb_threads_initialized = false;
      numThreads = threadCount();
      //numThreads = tbb::task_scheduler_init::default_num_threads();
    } else {
      g_tbb_threads_initialized = true;
      const size_t max_concurrency = threadCount();
      if (numThreads > max_concurrency) numThreads = max_concurrency;
      g_tbb_threads.initialize(int(numThreads));
    }

    /* start worker threads */
    if (start_threads) 
    {
      BarrierSys barrier(numThreads);
      tbb::parallel_for(size_t(0), size_t(numThreads), size_t(1), [&] ( size_t i ) {
          barrier.wait();
        });
    }
  }
Exemplo n.º 6
0
void
Scheduler::run()
{
    t_scheduler = this;
    if (gettid() != m_rootThread) {
        // Running in own thread
        t_fiber = Fiber::getThis().get();
    } else {
        // Hijacked a thread
        MORDOR_ASSERT(t_fiber.get() == Fiber::getThis().get());
    }
    Fiber::ptr idleFiber(new Fiber(boost::bind(&Scheduler::idle, this)));
    MORDOR_LOG_VERBOSE(g_log) << this << " starting thread with idle fiber " << idleFiber;
    Fiber::ptr dgFiber;
    // use a vector for O(1) .size()
    std::vector<FiberAndThread> batch(m_batchSize);
    bool isActive = false;
    while (true) {
        batch.clear();
        bool dontIdle = false;
        bool tickleMe = false;
        {
            boost::mutex::scoped_lock lock(m_mutex);
            // Kill ourselves off if needed
            if (m_threads.size() > m_threadCount && gettid() != m_rootThread) {
                // Accounting
                if (isActive)
                    --m_activeThreadCount;
                // Kill off the idle fiber
                try {
                    throw boost::enable_current_exception(
                        OperationAbortedException());
                } catch(...) {
                    idleFiber->inject(boost::current_exception());
                }
                // Detach our thread
                for (std::vector<boost::shared_ptr<Thread> >
                    ::iterator it = m_threads.begin();
                    it != m_threads.end();
                    ++it)
                    if ((*it)->tid() == gettid()) {
                        m_threads.erase(it);
                        if (m_threads.size() > m_threadCount)
                            tickle();
                        return;
                    }
                MORDOR_NOTREACHED();
            }

            std::list<FiberAndThread>::iterator it(m_fibers.begin());
            while (it != m_fibers.end()) {
                // If we've met our batch size, and we're not checking to see
                // if we need to tickle another thread, then break
                if ( (tickleMe || m_activeThreadCount == threadCount()) &&
                    batch.size() == m_batchSize)
                    break;

                if (it->thread != emptytid() && it->thread != gettid()) {
                    MORDOR_LOG_DEBUG(g_log) << this
                        << " skipping item scheduled for thread "
                        << it->thread;

                    // Wake up another thread to hopefully service this
                    tickleMe = true;
                    dontIdle = true;
                    ++it;
                    continue;
                }
                MORDOR_ASSERT(it->fiber || it->dg);
                // This fiber is still executing; probably just some race
                // race condition that it needs to yield on one thread
                // before running on another thread
                if (it->fiber && it->fiber->state() == Fiber::EXEC) {
                    MORDOR_LOG_DEBUG(g_log) << this
                        << " skipping executing fiber " << it->fiber;
                    ++it;
                    dontIdle = true;
                    continue;
                }
                // We were just checking if there is more work; there is, so
                // set the flag and don't actually take this piece of work
                if (batch.size() == m_batchSize) {
                    tickleMe = true;
                    break;
                }
                batch.push_back(*it);
                it = m_fibers.erase(it);
                if (!isActive) {
                    ++m_activeThreadCount;
                    isActive = true;
                }
            }
            if (batch.empty() && isActive) {
                --m_activeThreadCount;
                isActive = false;
            }
        }
        if (tickleMe)
            tickle();
        MORDOR_LOG_DEBUG(g_log) << this
            << " got " << batch.size() << " fiber/dgs to process (max: "
            << m_batchSize << ", active: " << isActive << ")";
        MORDOR_ASSERT(isActive == !batch.empty());
        if (!batch.empty()) {
            std::vector<FiberAndThread>::iterator it;
            for (it = batch.begin(); it != batch.end(); ++it) {
                Fiber::ptr f = it->fiber;
                boost::function<void ()> dg = it->dg;

                try {
                    if (f && f->state() != Fiber::TERM) {
                        MORDOR_LOG_DEBUG(g_log) << this << " running " << f;
                        f->yieldTo();
                    } else if (dg) {
                        if (!dgFiber)
                            dgFiber.reset(new Fiber(dg));
                        dgFiber->reset(dg);
                        MORDOR_LOG_DEBUG(g_log) << this << " running " << dg;
                        dgFiber->yieldTo();
                        if (dgFiber->state() != Fiber::TERM)
                            dgFiber.reset();
                        else
                            dgFiber->reset(NULL);
                    }
                } catch (...) {
                    MORDOR_LOG_FATAL(Log::root())
                        << boost::current_exception_diagnostic_information();
                    throw;
                }
            }
            continue;
        }
        if (dontIdle)
            continue;

        if (idleFiber->state() == Fiber::TERM) {
            MORDOR_LOG_DEBUG(g_log) << this << " idle fiber terminated";
            if (gettid() == m_rootThread)
                m_callingFiber.reset();
            // Unblock the next thread
            if (threadCount() > 1)
                tickle();
            return;
        }
        MORDOR_LOG_DEBUG(g_log) << this << " idling";
        idleFiber->call();
    }
}