int main(int argc, char *argv[]) { ThreadPool threadpool(10); std::thread thread([&threadpool] { for(int i = 0; i < 10; i++) { threadpool.AddTask([]() { int sum = 0; std::cout << "ID:" << std::this_thread::get_id() << " 执行任务" << std::endl; for(int i = 0; i < 1000000; i++) { sum += i; } }); } }); std::this_thread::sleep_for(std::chrono::seconds(2)); threadpool.Stop(); thread.join(); return EXIT_SUCCESS; }
int main(void) { #if 1 std::string path = "./conf.dat"; MyConf mcf(path); struct sockaddr_in address; MySocket msock(mcf); //std::cout<<"after MySocket msock(mcf)"<<std::endl; char recv_buf[1024]; ThreadPool threadpool(4, 10); threadpool.start(); while(1) { memset(recv_buf, 0, sizeof(recv_buf)); msock.recv_info(recv_buf, sizeof(recv_buf)); std::cout<<"the data : "<<recv_buf<<std::endl; std::string recv_buf_str(recv_buf); std::vector<std::string> recv_vec = MySplit.wordSplit(recv_buf_str); //MyTask* pTask = new MyTask(mcf, msock.getAddr(), recv_buf_str); MyTask* pTask = new MyTask(mcf, msock.getAddr(), recv_vec); threadpool.addTask(pTask); } #endif return 0; }
int main(void) { wd::Task* pTask = new wd::MyTask; wd::ThreadPool threadpool(4, 10); threadpool.start(); while(1) { threadpool.addTask(pTask); sleep(1); } delete pTask; return 0; }
int main(int argc,char**argv) { CTestTask*p=NULL; CMyThreadPool threadpool(10); for(int i=0;i<100;i++) { p=new CTestTask(i); threadpool.addTask(p,PRIORITY::NORMAL); } p=new CTestTask(102200); threadpool.addTask(p,PRIORITY::HIGH); //threadpool.destroyThreadPool(); //主线程执行其他工作。 { Sleep(1000*1000); } return 0; }
int main() { CommonData data; AThreadPool threadpool(mythreadproc, 20, NULL, &data, callbackThreadPoolMonitor); threadpool.setTotalThreadCreationCount(30); threadpool.start(); AFile_IOStream io; AString str; while (!g_MonitorExited && AConstant::npos != io.readLine(str)) { if (str.equalsNoCase("exit")) break; AThread::sleep(1000); } threadpool.stop(); return 0; }
int main() { Conf::init_instance( CONF_FILE ); my_daemon(); Logger::init_instance(); Split::init_instance(); PageDB::init_instance(); InvertIdx::init_instance(); int start = clock(); initiation(); printf("%.3lf second\nInitialization completed\n", double( clock()-start)/ CLOCKS_PER_SEC); Conf cf = Conf::get_instance(); size_t queuesize = strtoul( cf["queuesize"].c_str() ,NULL, 0); size_t threadsnum = strtoul( cf["threadsnum"].c_str(), NULL, 0 ); ThreadPool threadpool( queuesize, threadsnum ); threadpool.start(); unsigned short port = atoi( cf["port"].c_str() ); EpollPoller epoller( cf["ip"], port, threadpool ); epoller.loop(); epoller.unloop(); threadpool.stop(); InvertIdx::shut_down(); PageDB::shut_down(); Split::shut_down(); Logger::shut_down(); return 0; }
void Scene::raytraceImage(Camera *cam, Image *img) { boost::timer::auto_cpu_timer t; boost::threadpool::pool threadpool(7);//nCpus() * 2); std::vector<boost::packaged_task<std::vector<Vector3 *> > * > tasks; std::vector<boost::unique_future<std::vector<Vector3 *> > * > lines; // loop over all pixels in the image { for (int j = 0; j < img->height(); ++j) { tasks.push_back(new boost::packaged_task<std::vector<Vector3 *> >(boost::bind(&Scene::traceLine, this, cam, img, j))); lines.push_back(new boost::unique_future<std::vector<Vector3 *> >(tasks.back()->get_future())); boost::threadpool::schedule(threadpool, boost::bind(&boost::packaged_task<std::vector<Vector3 *> >::operator(), tasks.back())); } printf("Rendering Progress: %.3f%%\r", 0.f); for (int j = 0; j < img->height(); ++j) { for (int i = 0; i < img->width(); ++i) { if (lines[j]->get()[i]) { img->setPixel(i, j, *lines[j]->get()[i]); delete lines[j]->get()[i]; } } img->drawScanline(j); if (j + 1 == img->height() || !lines[j + 1]->has_value()) glFinish(); printf("Rendering Progress: %.3f%%\r", j/float(img->height())*100.0f); fflush(stdout); } } printf("Rendering Progress: 100.000%\n"); debug("done Raytracing!\n"); }
int main(void) { #if 1 std::string path = "./conf.dat"; MyConf mcf(path); struct sockaddr_in address; MySocket msock(mcf); //std::cout<<"after MySocket msock(mcf)"<<std::endl; char recv_buf[1024]; ThreadPool threadpool(4, 10); threadpool.start(); MySplit mysplit; while(1) { memset(recv_buf, 0, sizeof(recv_buf)); msock.recv_info(recv_buf, sizeof(recv_buf)); std::cout<<"the data : "<<recv_buf<<std::endl; std::vector<std::string> recv_vec; recv_vec = mysplit.wordSplit(recv_buf); //std::stringstream ss(recv_buf_str); //std::string word; std::cout<<"recv_vec.size="<<recv_vec.size()<<std::endl; for(auto it: recv_vec) { std::cout<<it<<", "; recv_vec.push_back(it); } std::cout<<std::endl; //MyTask* pTask = new MyTask(mcf, msock.getAddr(), recv_buf_str); MyTask* pTask = new MyTask(mcf, msock.getAddr(), recv_vec); threadpool.addTask(pTask); } #endif return 0; }
int main(int argc, char** argv) { // typedef int value_type; // typedef const value_type* const_pointer; // typedef const value_type& const_reference; // typedef const value_type* const_iterator; // // //CTimeManager* pTime = CTimeManager::CreateInst(); // //CPerformance* pPerformance = CPerformance::CreateInst(); // // //srand(time(NULL)); // // //PERFOR_TIMER_BEFORE(vector); // //std::vector<int> tvec; // //const int nCountMax = 10000; // //for (int i = 0; i < nCountMax; ++ i) // //{ // // int nRand= rand(); // // tvec.push_back(nRand); // //} // // //for (int i = 0; i < nCountMax; ++ i) // //{ // // int nRand= rand(); // // for (int j = 0; j < tvec.size(); ++ j) // // { // // if (nRand == tvec[j]) // // { // // break; // // } // // } // //} // //PERFOR_TIMER_AFTER(vector); // // //PERFOR_TIMER_BEFORE(hash_map); // //stdext::hash_map<int, int> tMap; // //for (int i = 0; i < nCountMax; ++ i) // //{ // // int nRand= rand(); // // tMap[nRand] = i; // //} // // //for (int i = 0; i < nCountMax; ++ i) // //{ // // int nRand= rand(); // // int j = tMap[nRand]; // //} // //PERFOR_TIMER_AFTER(hash_map); // //CPerformance::Inst()->PrintResult(); // //CTimeManager::DestroyInst(); // //CPerformance::DestroyInst(); // //int nMax = rand()%7; // // //bool bPerNodePod = std::is_pod<CPerforNode>::value; // //bool bTestPod = std::is_pod<CTestPod>::value; // //logdebugfunc(); // //filedebugfunc(); // //timedebugfunc(); // //i18ndebugfunc(); // //std::vector<int> dd; // //dd.push_back(1); // //dd.push_back(2); // //dd.push_back(3); // //for (auto &i:dd) // //{ // // Print("%d\n", i); // //} // // //testBitSet(); // //testtypetrait(); // // testarray(); // testList(); // testSlist(); // std::vector<int> arr; // arr.assign(3,3); // // std::list<int> list(3,0); // int size = sizeof(CSizeA); // int size2 = sizeof(CSizeB); // // testHashMap(); // testVector(); // testCriticalSection(); CLogManager* pLogManger = CLogManager::CreateInst(); CLog mProDebugLog; CLogManager::Inst()->AddDebugLog(&mProDebugLog, "pro"); CStdDisplayer tProPlayer; mProDebugLog.AddDisplayer(&tProPlayer); const char* pProLogName = "pro.log"; CRollFileDisplayer tProFileDisplayer(const_cast<char*>(pProLogName), 1024000, 10); mProDebugLog.AddDisplayer(&tProFileDisplayer); LOG_DEBUG("pro", "%s", "Pro log message is here!\n"); Myth::CThreadPool threadpool(4); CJob tJob1; tJob1.mNum = 1; threadpool.pushBackJob(&tJob1); CJob tJob2; tJob2.mNum = 2; threadpool.pushBackJob(&tJob2); CJob tJob3; tJob3.mNum = 3; threadpool.pushBackJob(&tJob3); CJob tJob4; tJob4.mNum = 4; threadpool.pushBackJob(&tJob4); CJob tJob5; tJob5.mNum = 5; threadpool.pushBackJob(&tJob5); CJob tJob6; tJob6.mNum = 6; threadpool.pushBackJob(&tJob6); int i = 0; while (true) { //printf("begin next\n"); cs.lock(); LOG_DEBUG("pro", "begin next\n"); cs.unlock(); threadpool.run(); #ifdef MYTH_OS_WINDOWS Sleep(100); #else struct timespec tv; tv.tv_sec = 5; tv.tv_nsec = 0; nanosleep(&tv, NULL); #endif ++i; if (i > 50) { break; } } CLogManager::DestroyInst(); }
int main(int argc, char *argv[]) { /* MUTEX */ pthread_mutex_init(&mux, NULL); pthread_mutex_init(&active_thread_mux,NULL); /*SIGNALS*/ /*signal(SIGUSR1,treatment);*/ int old_cancel_type; sigset_t set; sigfillset (&set); sigprocmask(SIG_BLOCK, &set, NULL); /* FIFO */ create_fifo(&front_server, &back_server); fifo_count = 0; item_server * item; /* POOL MANAGER */ pthread_t poolman_t; /* SERVADMIN */ pthread_t servadmin_t; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS,&old_cancel_type); pthread_cleanup_push(mainthread_kill,(void*)&poolman_t); /* SOCKETS */ int sockfd, newsockfd, portno; socklen_t clilen; struct sockaddr_in serv_addr, cli_addr; int n; pid_t pid; if (argc < 2) { fprintf(stderr,"ERROR, no port provided\n"); exit(1); } sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) error("ERROR opening socket"); bzero((char *) &serv_addr, sizeof(serv_addr)); portno = atoi(argv[1]); serv_addr.sin_family = AF_INET; serv_addr.sin_addr.s_addr = INADDR_ANY; serv_addr.sin_port = htons(portno); if (bind(sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) { perror("ERROR on binding"); exit(-1); } listen(sockfd,2); /* 1000 means???*/ clilen = sizeof(cli_addr); threadpool(); /* creates threadpool */ mainthread = pthread_self(); pthread_create(&servadmin_t, NULL, servadmin, NULL); pthread_create(&poolman_t, NULL, manager, NULL); sem_init(&sem_fifo_used, 0, 0); sem_init(&sem_fifo_free, 0, MAX_FIFO); while(1){ sem_wait(&sem_fifo_free); newsockfd = accept(sockfd, (struct sockaddr *) &cli_addr, &clilen); if (newsockfd < 0){ perror("ERROR on accept"); } item = (item_server*)malloc(1*sizeof(item_server)); item->socket = newsockfd; item->time = time(NULL); pthread_mutex_lock(&mux); /* Entering Critical FIFO Region*/ queue (&front_server ,&back_server, item); sem_post(&sem_fifo_used); fifo_count++; /* Exiting Critical FIFO Region*/ pthread_mutex_unlock(&mux); } close(sockfd); pthread_cleanup_pop(0); return 0; }
int main(int argc,char* argv[]) { /*****************************************/ //从命令行参数读取配置文件 /*****************************************/ #if 1 if(argc!=2) { perror("args error!"); exit(1); } Configure conf(argv[1]); #endif #if 0 Configure conf("conf.txt"); #endif Index mIndex(conf.getConf("mydict")); cout<<"加载配置成功……"<<endl; /*****************************************/ //初始化线程池 /*****************************************/ Threadpool threadpool(5,4); threadpool.start(); cout<<"线程池创建成功……"<<endl; /*****************************************/ //初始化TCP服务器 /*****************************************/ const string IP =conf.getConf("myip"); uint16_t PORT = atoi(conf.getConf("myport").c_str()); InetAddress serverAddr(IP,PORT); InetAddress clientAddr; Socket socket; int servfd = socket.fd(); socket.ready(serverAddr); char buf[32]; cout<<"服务器启动成功……"<<endl; /*****************************************/ //循环接收请求,并封装成任务,并加入线程池 /*****************************************/ Task *ptask ; cout<<"等待客户端连接"<<endl; while(1) { SocketIO sockIO(servfd); memset(buf,'\0',32); sockIO.readn(buf,32,clientAddr); /****************************** *log */ cout<<"IP:"<<clientAddr.ip()<<endl; cout<<"PORT:"<<clientAddr.port()<<endl; cout<<"------------------"<<endl; std::string str(buf); ptask = new Task(servfd,clientAddr,str,mIndex); threadpool.addTask(ptask); } return 0; }
//This is what the manager thread runs! void CMultiTaskHandler::RunTasks() { auto nbthreads = LibraryWide::getInstance().Data().getNbThreadsToUse(); vector<thread> threadpool(nbthreads); vector<thRunParam> taskSlots(nbthreads); bool hasnotaskrunning = false; //Reset worker state m_stopWorkers = false; //Instantiate threads for( unsigned int i = 0; i < threadpool.size(); ++i ) { taskSlots[i].waitTime = DUR_WORKER_THREAD_WAIT + chrono::nanoseconds(i * 10); threadpool[i] = std::move( thread( &CMultiTaskHandler::WorkerThread, this, std::ref(taskSlots[i]) ) ); } //Check when all tasks are done while( !( m_managerShouldStopAftCurTask.load() && hasnotaskrunning ) ) { //If all tasks are done set "hasnotaskrunning" to true! hasnotaskrunning = all_of( taskSlots.begin(), taskSlots.end(), [](thRunParam& astate){ return !(astate.runningTask); } ); //Trigger the all task finished cond var if the task queue is empty, and no tasks are running if( hasnotaskrunning ) { bool noTasksLeftInQueue = false; try { lock_guard<mutex> mylg( m_mutextasks ); noTasksLeftInQueue = m_tasks.empty(); } catch( exception e ){SimpleHandleException(e);} if( noTasksLeftInQueue ) { try { unique_lock<mutex> taskfinished(m_mutexTaskFinished); m_lastTaskFinished.notify_all(); } catch( exception e ){SimpleHandleException(e);} } } try { this_thread::sleep_for(DUR_MANAGER_THREAD_WAIT); } catch( exception e ){SimpleHandleException(e);} } //Tell the workers to stop m_stopWorkers = true; //Wait for all threads to finish for( unsigned int i = 0; i < threadpool.size(); ++i ) { if( threadpool[i].joinable() ) threadpool[i].join(); } }