Exemple #1
0
void 
MyObjectI::amdAdd_async(const Test::AMD_MyObject_amdAddPtr& cb, int x, int y, const Ice::Current&)
{
    class ThreadI : public Thread
    {
    public:
        
        ThreadI(const Test::AMD_MyObject_amdAddPtr& cb, int x, int y) :
            _cb(cb),
            _x(x),
            _y(y)
        {
        }

        void run()
        {
            ThreadControl::sleep(Time::milliSeconds(10));
            _cb->ice_response(_x + _y);
        }
    private:
        Test::AMD_MyObject_amdAddPtr _cb;
        int _x;
        int _y;
    };

    ThreadPtr thread = new ThreadI(cb, x, y);
    thread->start().detach();
}
Exemple #2
0
void 
MyObjectI::amdAddWithRetry_async(const Test::AMD_MyObject_amdAddWithRetryPtr& cb, int x, int y, const Ice::Current& current)
{
    class ThreadI : public Thread
    {
    public:
        
        ThreadI(const Test::AMD_MyObject_amdAddWithRetryPtr& cb, int x, int y) :
            _cb(cb),
            _x(x),
            _y(y)
        {
        }

        void run()
        {
            ThreadControl::sleep(Time::milliSeconds(10));
            _cb->ice_response(_x + _y);
        }
    private:
        Test::AMD_MyObject_amdAddWithRetryPtr _cb;
        int _x;
        int _y;
    };

    ThreadPtr thread = new ThreadI(cb, x, y);
    thread->start().detach();

    Ice::Context::const_iterator p = current.ctx.find("retry");
    
    if(p == current.ctx.end() || p->second != "no")
    {
        throw Test::RetryException(__FILE__, __LINE__);
    }
}
Exemple #3
0
void 
MyObjectI::amdBadAdd_async(const Test::AMD_MyObject_amdBadAddPtr& cb, int, int, const Ice::Current&)
{
    class ThreadI : public Thread
    {
    public:
        
        ThreadI(const Test::AMD_MyObject_amdBadAddPtr& cb) :
            _cb(cb)
        {
        }

        void run()
        {
            ThreadControl::sleep(Time::milliSeconds(10));
            Test::InvalidInputException e;
            _cb->ice_exception(e);
        }
    private:
        Test::AMD_MyObject_amdBadAddPtr _cb;
    };

    ThreadPtr thread = new ThreadI(cb);
    thread->start().detach();
}
TEST(ThreadedSerializer, CustomSerializationFunction)
{
  ThreadedSerializer<std::string, CustomSerializationFunction> ser;
  ser.verbose_ = true;
  ThreadPtr thread = ser.launch();  // Run in a different thread.

  string dir = "custom_threaded_serializer_test";
  if(!bfs::exists(dir))
    bfs::create_directory(dir);

  int num = 100;
  for(int i = 0; i < num; ++i) {
    ostringstream oss;
    oss << dir << "/string" << setw(3) << setfill('0') << i;
    string path = oss.str();
    oss.str("");
    oss << "The number is " << i << endl;
    string str = oss.str();
    ser.push(str, path);
  }

  ser.quit(); // Finish serializing everything in your queue and shut down your thread.
  thread->join();
  cout << "Done." << endl;
}
Exemple #5
0
ThreadPool::ThreadPool(int size , int sizeMax, int sizeWarn,int listSizeMax,int stackSize) :
_destroyed(false),
_listSize( 0 ),
_procSize( 0 ),
_listSizeMax( listSizeMax),
_size(size),
_sizeMax(sizeMax),
_sizeWarn(sizeWarn),
_stackSize(0),
_running(0),
_inUse(0),
_load(1.0),
_promote(true),
_waitingNumber(0)
{
if ( size < 1 )
size = 1;

if ( sizeMax < size )
sizeMax = size;

if ( sizeWarn > sizeMax )
sizeWarn = sizeMax;

if ( stackSize < 0 )
stackSize = 16 * 1024 * 1024;

const_cast<int&>(_size) = size;
const_cast<int&>(_sizeMax) = sizeMax;
const_cast<int&>(_sizeWarn) = sizeWarn;
const_cast<size_t&>(_stackSize) = static_cast<size_t>(stackSize);

try
{
for(int i = 0 ; i < _size ; ++i)
{
ThreadPtr thread = new EventHandlerThread(this);
thread->start(_stackSize);
_threads.push_back(thread);
++_running;
}
}
catch(const Exception& ex)
{
destroy();
joinWithAllThreads();
}
}
void CPUThreadPoolExecutor::threadRun(ThreadPtr thread) {
  this->threadPoolHook_.registerThread();

  thread->startupBaton.post();
  while (true) {
    auto task = taskQueue_->try_take_for(threadTimeout_);
    // Handle thread stopping, either by task timeout, or
    // by 'poison' task added in join() or stop().
    if (UNLIKELY(!task || task.value().poison)) {
      // Actually remove the thread from the list.
      SharedMutex::WriteHolder w{&threadListLock_};
      if (taskShouldStop(task)) {
        for (auto& o : observers_) {
          o->threadStopped(thread.get());
        }
        threadList_.remove(thread);
        stoppedThreads_.add(thread);
        return;
      } else {
        continue;
      }
    }

    runTask(thread, std::move(task.value()));

    if (UNLIKELY(threadsToStop_ > 0 && !isJoin_)) {
      SharedMutex::WriteHolder w{&threadListLock_};
      if (tryDecrToStop()) {
        threadList_.remove(thread);
        stoppedThreads_.add(thread);
        return;
      }
    }
  }
}
Exemple #7
0
WebEventServiceImpl::WebEventServiceImpl(Poco::OSP::BundleContext::Ptr pContext, int maxSockets):
    _pContext(pContext),
    _maxSockets(maxSockets),
    _mainRunnable(*this, &WebEventServiceImpl::runMain),
    _workerRunnable(*this, &WebEventServiceImpl::runWorker),
    _stopped(false)
{
    unsigned workerCount = 2*Poco::Environment::processorCount();
    for (unsigned i = 0; i < workerCount; i++)
    {
        ThreadPtr pThread = new Poco::Thread;
        pThread->start(_workerRunnable);
        _workerThreads.push_back(pThread);
    }
    _mainThread.start(_mainRunnable);
}
Exemple #8
0
static void* startHook(void* arg)
{
    ThreadPtr thread;
    try
    {
        Thread* rawThread = static_cast<Thread*>(arg);
        thread = rawThread;
        rawThread->__decRef();
        thread->run();
    }
    catch(...)
    {
        std::terminate();
    }
    thread->_done();
    return 0;
}
TEST(ThreadedSerializer, SerializableObject)
{
  ThreadedSerializer<NDC> ser;
  ser.verbose_ = true;
  ThreadPtr thread = ser.launch();  // Run in a different thread.

  string dir = "threaded_serializer_test";
  if(!bfs::exists(dir))
    bfs::create_directory(dir);

  int num = 100;
  for(int i = 0; i < num; ++i) {
    ostringstream oss;
    oss << dir << "/ndc" << setw(3) << setfill('0') << i;
    ser.push(NDC(i), oss.str());
  }

  ser.quit(); // Finish serializing everything in your queue and shut down your thread.
  thread->join();
  cout << "Done." << endl;
}
Exemple #10
0
    void TimerMonitor::waitForShutdown()
    {
      ThreadPtr thread;
      {
        AutoRecursiveLock lock(mLock);
        thread = mThread;

        mShouldShutdown = true;
        wakeUp();
      }

      if (!thread)
        return;

      thread->join();

      {
        AutoRecursiveLock lock(mLock);
        mThread.reset();
      }
    }
Exemple #11
0
void ThreadPool::promoteFollower( pthread_t thid )
{
if(_sizeMax > 1)
{
this->lock();
assert(!_promote);
_promote = true;
this->notify();

if(!_destroyed)
{
assert(_inUse >= 0);
++_inUse;

if(_inUse == _sizeWarn)
{
}

assert(_inUse <= _running);
if(_inUse < _sizeMax && _inUse == _running)
{
try
{
ThreadPtr thread = new EventHandlerThread(this);
thread->start(_stackSize);
_threads.push_back(thread);
++_running;
}
catch(const Exception& ex)
{
throw ThreadCreateException(__FILE__,__LINE__);
}
}
}
this->unlock();
}
}
Exemple #12
0
void 
MyObjectI::amdBadSystemAdd_async(const Test::AMD_MyObject_amdBadSystemAddPtr& cb, int, int, const Ice::Current&)
{
    class ThreadI : public Thread
    {
    public:
        
        ThreadI(const Test::AMD_MyObject_amdBadSystemAddPtr& cb) :
            _cb(cb)
        {
        }

        void run()
        {
            ThreadControl::sleep(Time::milliSeconds(10));
            _cb->ice_exception(Ice::InitializationException(__FILE__, __LINE__, "just testing"));
        }
    private:
        Test::AMD_MyObject_amdBadSystemAddPtr _cb;
    };

    ThreadPtr thread = new ThreadI(cb);
    thread->start().detach();
}
Exemple #13
0
        SignalDispatcher(const Args&... args)
            : _signal_set(_io_service, SIGINT, SIGTERM)
            , _thread(new std::thread(std::bind((IoServiceRunFunc)&IoService::run, &_io_service)))
        {
            std::size_t size = sizeof...(args);
            StopHandle res[sizeof...(args)] = {args...};

            auto stop_func = [size, res] () {
                for (std::uint32_t i = 0; i < size; ++i) {
                    res[i]();
                }
                LOG(INFO) << "Stop modules.";
            };
            _signal_set.async_wait(std::bind(stop_func));
            _thread->join();
        }
Exemple #14
0
void
PriorityInversionTest::run()
{
    int cores, high, medium, low, timeout;
    timeout = 30;
#ifdef _WIN32
    return; //Priority inversion is not supported by WIN32
#else
    try
    {
        IceUtil::Mutex m;
    }
    catch(const IceUtil::ThreadSyscallException&)
    {
        return; // Mutex protocol PrioInherit not supported
    }
    cores = static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
    high = 45;
    medium = 35;
    low = 1;
#endif

    {
        Monitor<Mutex> monitor;
        TaskCollectorPtr collector = new TaskCollector(cores, high, medium, low, monitor);
        vector<ThreadControl> threads;
                
        SharedResourcePtr shared = new SharedResourceMutex(collector);

        //
        // Create one low priority thread.
        //
        TaskPtr lowThread = new Task(shared);
        threads.push_back(lowThread->start(128, low));
        lowThread->waitAcquired();

        //
        // Create one high priority thread that use the same shared resource
        // as the previous low priority thread
        //
        TaskPtr highThread = new Task(shared);
        threads.push_back(highThread->start(128, high));

        //
        // Create one medium priority thread per core.
        //
        for(int cont = 0; cont < cores; ++cont)
        {
            ThreadPtr t = new MediumPriorityThread(collector, highThread, timeout);
            threads.push_back(t->start(128, medium));
        }

        //
        // Join with all the threads.
        //
        vector<ThreadControl>::iterator it;
        for(it = threads.begin(); it != threads.end(); ++it)
        {
            try
            {
                (*it).join();
            }
            catch(...)
            {
            }
        }
    }

    //
    // Same test with a recursive mutex.
    //
    {
        Monitor<Mutex> monitor;
        TaskCollectorPtr collector = new TaskCollector(cores, high, medium, low, monitor);

        SharedResourcePtr shared = new SharedResourceRecMutex(collector);

        vector<ThreadControl> threads;

        //
        // Create one low priority thread.
        //
        TaskPtr lowThread = new Task(shared);
        threads.push_back(lowThread->start(128, low));
        lowThread->waitAcquired();

        //
        // Create one high priority thread that use the same shared resource
        // as the previous low priority thread.
        //
        ThreadPtr highThread = new Task(shared);
        threads.push_back(highThread->start(128, high));

        //
        // Create one medium priority tasks per core that runs until
        // the high priority thread is running.
        //
        for(int cont = 0; cont < cores; ++cont)
        {
            ThreadPtr t = new MediumPriorityThread(collector, highThread, timeout);
            threads.push_back(t->start(128, medium));
        }

        //
        // Join with all the threads.
        //
        vector<ThreadControl>::iterator it;
        for(it = threads.begin(); it != threads.end(); ++it)
        {
            try
            {
                (*it).join();
            }
            catch(...)
            {
            }
        }
    }
}
Exemple #15
0
	void RemoveFromPool(const ThreadPtr& threadPtr)
	{
		m_Pool.remove_thread(threadPtr.get());
	}
Exemple #16
0
static void _start(int thread)
{
	monitor m;
	m.count = thread;
	m.sleep = 0;
	m.m = new SNMonitor*[thread];
	for (int i = 0; i < thread; i++) {
		m.m[i] = new SNMonitor();
	}

	g_monitorTherad = ThreadPtr(new std::thread(std::bind(_monitor, &m)));
	g_timerTherad = ThreadPtr(new std::thread(std::bind(_timer, &m)));
	g_socketTherad = ThreadPtr(new std::thread(std::bind(_socket, &m)));

	static int weight[] = {
		-1, -1, -1, -1, 0, 0, 0, 0,
		1, 1, 1, 1, 1, 1, 1, 1,
		2, 2, 2, 2, 2, 2, 2, 2,
		3, 3, 3, 3, 3, 3, 3, 3, };

	worker_parm *wp = new worker_parm[thread];
	for (int i = 0; i < thread; i++) {
		wp[i].m = &m;
		wp[i].id = i;
		if (i < sizeof(weight) / sizeof(weight[0])) {
			wp[i].weight = weight[i];
		}
		else {
			wp[i].weight = 0;
		}
		ThreadPtr pWork(new std::thread(std::bind(_worker, &wp[i])));
		g_threads.push_back(pWork);
	}
	
	//////////////////////////////////////////////////////////////////////////
	// TODO :: 处理退出步骤,这里退出顺序很重要,具体效果还有待测试

	// 1: 等待socket线程最先退出来
	g_socketTherad->join();
	LogInfo("Socket Thread Exit\n");

	// 2: 等待定时器和监控线程退出来
	g_timerTherad->join();
	LogInfo("Timer Thread Exit\n");

	g_monitorTherad->join();
	LogInfo("Monitor Thread Exit\n");

	// 3: 等待工作线程将所有的消息分发完毕再退出来
	while (m.sleep != (int)g_threads.size()) {
		LogInfo("Wait DispatchMessageQueue\n");
		std::this_thread::sleep_for(std::chrono::milliseconds(1000));
	}
	m.cond.notify_all();
	for (auto it = g_threads.begin(); it != g_threads.end(); ++it) {
		(*it)->join();
	}
	LogInfo("Worket Thread Group Exit\n");

	// 3 : 释放所有数据
	SNServer::Get()->Release();

	delete[] wp;
	for (int i = 0; i < m.count; ++i) {
		delete m.m[i];
	}
	delete[] m.m;
}