/* main thread **************************************************************/ static void iv_work_event(void *_pool) { struct work_pool_priv *pool = _pool; struct iv_list_head items; mutex_lock(&pool->lock); __iv_list_steal_elements(&pool->work_done, &items); mutex_unlock(&pool->lock); while (!iv_list_empty(&items)) { struct iv_work_item *work; work = iv_container_of(items.next, struct iv_work_item, list); iv_list_del(&work->list); work->completion(work->cookie); } if (pool->shutting_down) { mutex_lock(&pool->lock); if (!pool->started_threads && iv_list_empty(&pool->work_done)) { mutex_unlock(&pool->lock); mutex_destroy(&pool->lock); iv_event_unregister(&pool->ev); free(pool); return; } mutex_unlock(&pool->lock); } }
static void iv_work_thread_got_event(void *_thr) { struct work_pool_thread *thr = _thr; struct work_pool_priv *pool = thr->pool; uint32_t last_seq; mutex_lock(&pool->lock); thr->kicked = 0; if (!iv_list_empty(&thr->list)) { iv_list_del_init(&thr->list); iv_timer_unregister(&thr->idle_timer); } last_seq = pool->seq_tail; while ((int32_t)(last_seq - pool->seq_head) > 0) { struct iv_work_item *work; pool->seq_head++; work = iv_container_of(pool->work_items.next, struct iv_work_item, list); iv_list_del(&work->list); mutex_unlock(&pool->lock); work->work(work->cookie); iv_invalidate_now(); mutex_lock(&pool->lock); if (iv_list_empty(&pool->work_done)) iv_event_post(&pool->ev); iv_list_add_tail(&work->list, &pool->work_done); } if (pool->seq_head == pool->seq_tail) { if (!pool->shutting_down) { iv_list_add(&thr->list, &pool->idle_threads); iv_validate_now(); thr->idle_timer.expires = iv_now; thr->idle_timer.expires.tv_sec += 10; iv_timer_register(&thr->idle_timer); } else { __iv_work_thread_die(thr); } } else { /* * If we're already at the maximum number of pool * threads, and none of those threads were idle when * more work arrived, then there may have been no * kick sent for the new work item(s) (and no new * pool thread started either), so if we're leaving * with work items still pending, make sure we get * called again, so that we don't deadlock. */ iv_event_post(&thr->kick); } mutex_unlock(&pool->lock); }
void iv_fd_poll_and_run(struct iv_state *st, struct timespec *to) { struct iv_list_head active; __iv_invalidate_now(st); INIT_IV_LIST_HEAD(&active); method->poll(st, &active, to); while (!iv_list_empty(&active)) { struct iv_fd_ *fd; fd = iv_list_entry(active.next, struct iv_fd_, list_active); iv_list_del_init(&fd->list_active); st->handled_fd = fd; if (fd->ready_bands & MASKERR) if (fd->handler_err != NULL) fd->handler_err(fd->cookie); if (st->handled_fd != NULL && fd->ready_bands & MASKIN) if (fd->handler_in != NULL) fd->handler_in(fd->cookie); if (st->handled_fd != NULL && fd->ready_bands & MASKOUT) if (fd->handler_out != NULL) fd->handler_out(fd->cookie); } }
void iv_fd_make_ready(struct iv_list_head *active, struct iv_fd_ *fd, int bands) { if (iv_list_empty(&fd->list_active)) { fd->ready_bands = 0; iv_list_add_tail(&fd->list_active, active); } fd->ready_bands |= bands; }
static void iv_fd_port_upload(struct iv_state *st) { while (!iv_list_empty(&st->u.port.notify)) { struct iv_fd_ *fd; fd = iv_list_entry(st->u.port.notify.next, struct iv_fd_, list_notify); iv_fd_port_upload_one(st, fd); } }
void iv_task_unregister(struct iv_task *_t) { struct iv_state *st = iv_get_state(); struct iv_task_ *t = (struct iv_task_ *)_t; if (iv_list_empty(&t->list)) iv_fatal("iv_task_unregister: called with task not on a list"); st->numobjs--; iv_list_del_init(&t->list); }
void iv_task_register(struct iv_task *_t) { struct iv_state *st = iv_get_state(); struct iv_task_ *t = (struct iv_task_ *)_t; if (!iv_list_empty(&t->list)) iv_fatal("iv_task_register: called with task still on a list"); st->numobjs++; iv_list_add_tail(&t->list, &st->tasks); }
void iv_run_tasks(struct iv_state *st) { struct iv_list_head tasks; __iv_list_steal_elements(&st->tasks, &tasks); while (!iv_list_empty(&tasks)) { struct iv_task_ *t; t = iv_list_entry(tasks.next, struct iv_task_, list); iv_list_del_init(&t->list); st->numobjs--; t->handler(t->cookie); } }
static void log_queue_fifo_free_queue(struct iv_list_head *q) { while (!iv_list_empty(q)) { LogMessageQueueNode *node; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; LogMessage *msg; node = iv_list_entry(q->next, LogMessageQueueNode, list); iv_list_del(&node->list); path_options.ack_needed = node->ack_needed; msg = node->msg; log_msg_free_queue_node(node); log_msg_ack(msg, &path_options); log_msg_unref(msg); } }
static void iv_fd_kqueue_upload(struct iv_state *st, struct kevent *kev, int size, int *num) { *num = 0; while (!iv_list_empty(&st->u.kqueue.notify)) { struct iv_fd_ *fd; if (*num > size - 2) { kevent_retry("iv_fd_kqueue_upload", st, kev, *num); *num = 0; } fd = iv_list_entry(st->u.kqueue.notify.next, struct iv_fd_, list_notify); iv_fd_kqueue_queue_one(kev, num, fd); fd->registered_bands = fd->wanted_bands; } }
/* worker thread ************************************************************/ static void __iv_work_thread_die(struct work_pool_thread *thr) { struct work_pool_priv *pool = thr->pool; if (thr->kicked) iv_fatal("__iv_work_thread_die: called on kicked thread"); if (!iv_list_empty(&thr->list)) iv_fatal("__iv_work_thread_die: thread still on list"); iv_event_unregister(&thr->kick); free(thr); pool->started_threads--; if (pool->thread_stop != NULL) pool->thread_stop(pool->cookie); if (pool->shutting_down && !pool->started_threads) iv_event_post(&pool->ev); }
static void iv_fd_kqueue_unregister_fd(struct iv_state *st, struct iv_fd_ *fd) { if (!iv_list_empty(&fd->list_notify)) iv_fd_kqueue_upload_all(st); }
static void iv_fd_port_unregister_fd(struct iv_state *st, struct iv_fd_ *fd) { if (!iv_list_empty(&fd->list_notify)) iv_fd_port_upload_one(st, fd); }
int iv_task_registered(struct iv_task *_t) { struct iv_task_ *t = (struct iv_task_ *)_t; return !iv_list_empty(&t->list); }
int iv_pending_tasks(struct iv_state *st) { return !iv_list_empty(&st->tasks); }