/* * create a thread pool, that has thread_num threads. * @param {int} thread_num thread num. * @param {void *} udata user data pointer. * @param {function} func_leader leader function, need the one param. * return need to resume threads num, if less than 0, then exit. * @param {function} func_task task function, need the one param. * return 0 then not task to do. or else is to exit. */ struct cthread_pool *cthread_pool_create(int thread_num, void *udata, int (*func_leader)(void *), int (*func_task)(void *)) { struct cthread_pool *self; assert(thread_num > 0 && func_leader != NULL && func_task != NULL); if (thread_num <= 0 || !func_leader || !func_task) return NULL; self = (struct cthread_pool *)malloc(sizeof(struct cthread_pool)); if (!self) return NULL; cthread_list_init(&self->all_list); catomic_set(&self->run, 0); catomic_set(&self->resume_num, 0); catomic_set(&self->suspend_num, 0); catomic_set(&self->activity_num, 0); catomic_set(&self->need_exit_num, 0); catomic_set(&self->exit_num, 0); catomic_set(&self->has_leader, 0); self->thread_num = thread_num; self->udata = udata; self->func_leader = func_leader; self->func_task = func_task; while (thread_num > 0) { struct cthread_info *cinfo = cthread_info_create(self, th_pro_func); if (!cinfo) goto err_do; cthread_list_push_back(&self->all_list, cinfo); thread_num--; } catomic_set(&self->run, 1); thread_pool_debuglog("func[%s] mgr:%p", __FUNCTION__, self); /* resume all. */ cthread_pool_resume_some_thread(self, self->thread_num, NULL); /* wait thread run. */ while (catomic_read(&self->need_exit_num) != (int64)self->thread_num) { cthread_self_sleep(0); } return self; err_do: if (self) { cthread_list_destroy(&self->all_list); free(self); } return NULL; }
void cthread_pool_release(struct cthread_pool *self) { if (!self) return; thread_pool_debuglog("func[%s] mgr:%p", __FUNCTION__, self); catomic_set(&self->run, 0); while (catomic_read(&self->exit_num) != catomic_read(&self->need_exit_num)) { cthread_pool_resume_some_thread(self, self->thread_num, NULL); /* sleep 1 ms. */ cthread_self_sleep(1); } assert(catomic_read(&self->suspend_num) == 0); assert(catomic_read(&self->activity_num) == 0); assert(catomic_read(&self->need_exit_num) == catomic_read(&self->exit_num)); assert(catomic_read(&self->need_exit_num) <= self->thread_num); cthread_list_destroy(&self->all_list); free(self); }
/* * initialize event manager. * socketer_num --- socket total number. must greater than 1. * thread_num --- thread number, if less than 0, then start by the number of cpu threads */ bool eventmgr_init(int socketer_num, int thread_num) { if (s_mgr || socketer_num < 1) return false; if (thread_num <= 0) { thread_num = get_cpu_num(); } /* in kqueue, must only one thead. */ thread_num = 1; { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_IGN; sigemptyset(&sa.sa_mask); if (sigaction(SIGPIPE, &sa, NULL) == -1) return false; } s_mgr = (struct kqueuemgr *)malloc(sizeof(struct kqueuemgr)); if (!s_mgr) return false; /* initialize. */ catomic_set(&s_mgr->event_num, 0); s_mgr->kqueue_fd = kqueue(); if (s_mgr->kqueue_fd == -1) { free(s_mgr); s_mgr = NULL; return false; } s_mgr->thread_num = thread_num; s_mgr->need_exit = false; /* first building kqueue module, and then create thread pool. */ s_mgr->thread_pool = cthread_pool_create(thread_num, s_mgr, leader_func, task_func); if (!s_mgr->thread_pool) { close(s_mgr->kqueue_fd); free(s_mgr); s_mgr = NULL; return false; } return true; }
/* get need resume thread number. */ static int leader_func(void *argv) { struct kqueuemgr *mgr = (struct kqueuemgr *)argv; /* wait event. */ if (mgr->need_exit) { return -1; } else { struct timespec timeout; timeout.tv_sec = 0; timeout.tv_nsec = 50 * 1000000; int num = kevent(mgr->kqueue_fd, NULL, 0, mgr->ev_array, THREAD_EVENT_SIZE, &timeout); if (num > 0) { catomic_set(&mgr->event_num, num); num = (num + (int)(EVERY_THREAD_PROCESS_EVENT_NUM) - 1) / (int)(EVERY_THREAD_PROCESS_EVENT_NUM); } else if (num < 0) { if (num == -1 && NET_GetLastError() == EINTR) return 0; log_error("kevent return value < 0, error, return value:%d, errno:%d", num, NET_GetLastError()); } return num; } }
static void th_pro_func(cthread *th) { int cinfo_id = 0; struct cthread_info *cinfo = (struct cthread_info *)cthread_get_udata(th); struct cthread_pool *mgr = cinfo->mgr; /* first suspend. */ cthread_suspend(cthread_info_get_handle_ptr(cinfo)); /* check need run. */ if (catomic_read(&mgr->run) == 0) return; cinfo_id = (int)cthread_info_get_id(cinfo); (void)cinfo_id; cthread_info_state_to_activity(cinfo); catomic_inc(&mgr->resume_num); catomic_inc(&mgr->activity_num); catomic_inc(&mgr->need_exit_num); /* wait all run to here. */ while (catomic_read(&mgr->need_exit_num) != (int64)mgr->thread_num) { cthread_self_sleep(0); } thread_pool_debuglog("func:[%s][start thread] id:%d", __FUNCTION__, cthread_info_get_id(cinfo)); while (catomic_read(&mgr->run) != 0) { if (cthread_info_is_header(cinfo)) { /* * do leader function, * if return value less than 0, then exit. * if return value greater than 0, then is need resume thread num. */ int resume_num = mgr->func_leader(mgr->udata); if (resume_num > 0) { int real_resume_num = (int)min(resume_num, catomic_read(&mgr->need_exit_num)); thread_pool_debuglog("func:[%s][leader func return] leader thread id:%d, " "resume_num:%d, activity num:%d, exit num:%d, has_leader:%d", __FUNCTION__, cthread_info_get_id(cinfo), resume_num, (int)catomic_read(&mgr->activity_num), (int)catomic_read(&mgr->exit_num), (int)catomic_read(&mgr->has_leader)); cthread_info_change_to_henchman(cinfo); catomic_set(&mgr->has_leader, 0); catomic_set(&mgr->resume_num, real_resume_num); cthread_pool_resume_some_thread(mgr, real_resume_num - 1, cinfo); } else if (resume_num < 0) { break; } } else { /* * do task function, * the return value is not equal to 0, then exit. */ if (mgr->func_task(mgr->udata) != 0) break; /* If own is the last activity of the followers, set own to leader. */ if (catomic_dec(&mgr->resume_num) == 0) { assert(catomic_read(&mgr->activity_num) >= 1); assert(catomic_read(&mgr->has_leader) == 0); /* competition leader. */ if (catomic_compare_set(&mgr->has_leader, 0, 1)) { /* change own to leader. */ cthread_info_change_to_header(cinfo); thread_pool_debuglog("func:[%s][change to leader] task thread id:%d, " "activity num:%d, exit num:%d, has_leader:%d", __FUNCTION__, cthread_info_get_id(cinfo), (int)catomic_read(&mgr->activity_num), (int)catomic_read(&mgr->exit_num), (int)catomic_read(&mgr->has_leader)); continue; } assert(false && "is last activity thread, but not change to leader, error!"); log_error("is last activity thread, but not change to leader, error!"); } /* suspend. */ cthread_info_state_to_suspend(cinfo); catomic_dec(&mgr->activity_num); catomic_inc(&mgr->suspend_num); thread_pool_debuglog("func:[%s][suspend thread] thread id:%d, activity num:%d, " "exit num:%d, has_leader:%d", __FUNCTION__, cthread_info_get_id(cinfo), (int)catomic_read(&mgr->activity_num), (int)catomic_read(&mgr->exit_num), (int)catomic_read(&mgr->has_leader)); /* real do suspend. */ cthread_suspend(cthread_info_get_handle_ptr(cinfo)); /* from resume. */ cthread_info_state_to_activity(cinfo); catomic_dec(&mgr->suspend_num); catomic_inc(&mgr->activity_num); thread_pool_debuglog("func:[%s][thread for resume] thread id:%d, activity num:%d, " "exit num:%d, has_leader:%d", __FUNCTION__, cthread_info_get_id(cinfo), (int)catomic_read(&mgr->activity_num), (int)catomic_read(&mgr->exit_num), (int)catomic_read(&mgr->has_leader)); } } cthread_pool_do_thread_exit(cinfo); }