void iv_fd_poll_and_run(struct iv_state *st, struct timespec *to) { struct iv_list_head active; __iv_invalidate_now(st); INIT_IV_LIST_HEAD(&active); method->poll(st, &active, to); while (!iv_list_empty(&active)) { struct iv_fd_ *fd; fd = iv_list_entry(active.next, struct iv_fd_, list_active); iv_list_del_init(&fd->list_active); st->handled_fd = fd; if (fd->ready_bands & MASKERR) if (fd->handler_err != NULL) fd->handler_err(fd->cookie); if (st->handled_fd != NULL && fd->ready_bands & MASKIN) if (fd->handler_in != NULL) fd->handler_in(fd->cookie); if (st->handled_fd != NULL && fd->ready_bands & MASKOUT) if (fd->handler_out != NULL) fd->handler_out(fd->cookie); } }
static void iv_fd_kqueue_queue_one(struct kevent *kev, int *_num, struct iv_fd_ *fd) { int num; int wanted; int regd; iv_list_del_init(&fd->list_notify); num = *_num; wanted = fd->wanted_bands; regd = fd->registered_bands; if (!(wanted & MASKIN) && (regd & MASKIN)) { EV_SET(&kev[num], fd->fd, EVFILT_READ, EV_DELETE, 0, 0, (void *)(intptr_t)fd); num++; } else if ((wanted & MASKIN) && !(regd & MASKIN)) { EV_SET(&kev[num], fd->fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, (void *)(intptr_t)fd); num++; } if (!(wanted & MASKOUT) && (regd & MASKOUT)) { EV_SET(&kev[num], fd->fd, EVFILT_WRITE, EV_DELETE, 0, 0, (void *)(intptr_t)fd); num++; } else if ((wanted & MASKOUT) && !(regd & MASKOUT)) { EV_SET(&kev[num], fd->fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, 0, 0, (void *)(intptr_t)fd); num++; } *_num = num; }
static void log_queue_fifo_rewind_backlog(LogQueue *s, guint rewind_count) { LogQueueFifo *self = (LogQueueFifo *) s; guint pos; if (rewind_count > self->qbacklog_len) rewind_count = self->qbacklog_len; for (pos = 0; pos < rewind_count; pos++) { LogMessageQueueNode *node = iv_list_entry(self->qbacklog.prev, LogMessageQueueNode, list); /* * Because the message go to the backlog only in case of pop_head * and pop_head add ack and ref when it pushes the message into the backlog * The rewind must decrease the ack and ref too */ iv_list_del_init(&node->list); iv_list_add(&node->list, &self->qoverflow_output); self->qbacklog_len--; self->qoverflow_output_len++; stats_counter_inc(self->super.stored_messages); } }
static void iv_work_thread_got_event(void *_thr) { struct work_pool_thread *thr = _thr; struct work_pool_priv *pool = thr->pool; uint32_t last_seq; mutex_lock(&pool->lock); thr->kicked = 0; if (!iv_list_empty(&thr->list)) { iv_list_del_init(&thr->list); iv_timer_unregister(&thr->idle_timer); } last_seq = pool->seq_tail; while ((int32_t)(last_seq - pool->seq_head) > 0) { struct iv_work_item *work; pool->seq_head++; work = iv_container_of(pool->work_items.next, struct iv_work_item, list); iv_list_del(&work->list); mutex_unlock(&pool->lock); work->work(work->cookie); iv_invalidate_now(); mutex_lock(&pool->lock); if (iv_list_empty(&pool->work_done)) iv_event_post(&pool->ev); iv_list_add_tail(&work->list, &pool->work_done); } if (pool->seq_head == pool->seq_tail) { if (!pool->shutting_down) { iv_list_add(&thr->list, &pool->idle_threads); iv_validate_now(); thr->idle_timer.expires = iv_now; thr->idle_timer.expires.tv_sec += 10; iv_timer_register(&thr->idle_timer); } else { __iv_work_thread_die(thr); } } else { /* * If we're already at the maximum number of pool * threads, and none of those threads were idle when * more work arrived, then there may have been no * kick sent for the new work item(s) (and no new * pool thread started either), so if we're leaving * with work items still pending, make sure we get * called again, so that we don't deadlock. */ iv_event_post(&thr->kick); } mutex_unlock(&pool->lock); }
/* * Can only run from the output thread. * * NOTE: this returns a reference which the caller must take care to free. */ static LogMessage * log_queue_fifo_pop_head(LogQueue *s, LogPathOptions *path_options) { LogQueueFifo *self = (LogQueueFifo *) s; LogMessageQueueNode *node; LogMessage *msg = NULL; if (self->qoverflow_output_len == 0) { /* slow path, output queue is empty, get some elements from the wait queue */ g_static_mutex_lock(&self->super.lock); iv_list_splice_tail_init(&self->qoverflow_wait, &self->qoverflow_output); self->qoverflow_output_len = self->qoverflow_wait_len; self->qoverflow_wait_len = 0; g_static_mutex_unlock(&self->super.lock); } if (self->qoverflow_output_len > 0) { node = iv_list_entry(self->qoverflow_output.next, LogMessageQueueNode, list); msg = node->msg; path_options->ack_needed = node->ack_needed; self->qoverflow_output_len--; if (!self->super.use_backlog) { iv_list_del(&node->list); log_msg_free_queue_node(node); } else { iv_list_del_init(&node->list); } } else { /* no items either on the wait queue nor the output queue. * * NOTE: the input queues may contain items even in this case, * however we don't touch them here, they'll be migrated to the * wait_queue once the input threads finish their processing (or * the high watermark is reached). Also, they are unlocked, so * no way to touch them safely. */ return NULL; } stats_counter_dec(self->super.stored_messages); if (self->super.use_backlog) { log_msg_ref(msg); iv_list_add_tail(&node->list, &self->qbacklog); self->qbacklog_len++; } return msg; }
void iv_task_unregister(struct iv_task *_t) { struct iv_state *st = iv_get_state(); struct iv_task_ *t = (struct iv_task_ *)_t; if (iv_list_empty(&t->list)) iv_fatal("iv_task_unregister: called with task not on a list"); st->numobjs--; iv_list_del_init(&t->list); }
void iv_run_tasks(struct iv_state *st) { struct iv_list_head tasks; __iv_list_steal_elements(&st->tasks, &tasks); while (!iv_list_empty(&tasks)) { struct iv_task_ *t; t = iv_list_entry(tasks.next, struct iv_task_, list); iv_list_del_init(&t->list); st->numobjs--; t->handler(t->cookie); } }
static void iv_work_thread_idle_timeout(void *_thr) { struct work_pool_thread *thr = _thr; struct work_pool_priv *pool = thr->pool; mutex_lock(&pool->lock); if (thr->kicked) { thr->idle_timer.expires = iv_now; thr->idle_timer.expires.tv_sec += 10; iv_timer_register(&thr->idle_timer); } else { iv_list_del_init(&thr->list); __iv_work_thread_die(thr); } mutex_unlock(&pool->lock); }
static int __iv_fd_port_upload_one(struct iv_state *st, struct iv_fd_ *fd) { int ret; iv_list_del_init(&fd->list_notify); if (fd->wanted_bands) { ret = port_associate(st->u.port.port_fd, PORT_SOURCE_FD, fd->fd, bits_to_poll_mask(fd->wanted_bands), fd); } else { ret = port_dissociate(st->u.port.port_fd, PORT_SOURCE_FD, fd->fd); } if (ret == 0) fd->registered_bands = fd->wanted_bands; return ret; }
static void iv_fd_kqueue_notify_fd(struct iv_state *st, struct iv_fd_ *fd) { iv_list_del_init(&fd->list_notify); if (fd->registered_bands != fd->wanted_bands) iv_list_add_tail(&fd->list_notify, &st->u.kqueue.notify); }
static int iv_fd_port_poll(struct iv_state *st, struct iv_list_head *active, const struct timespec *abs) { struct timespec _rel; struct timespec *rel; int run_timers; int run_events; unsigned int nget; port_event_t pe[PORTEV_NUM]; int ret; int i; iv_fd_port_upload(st); rel = to_relative(st, &_rel, abs); run_timers = 0; if (rel != NULL && rel->tv_sec == 0 && rel->tv_nsec == 0) run_timers = 1; run_events = 0; poll_more: nget = 1; /* * If we get EINTR from port_getn(), no events are returned * and nget will not have been updated, but if we get ETIME, * events may be returned, and nget will be set to the number * of events in the array, and we need to process those * events as usual. */ ret = port_getn(st->u.port.port_fd, pe, PORTEV_NUM, &nget, rel); __iv_invalidate_now(st); if (ret < 0 && errno != ETIME) { if (errno == EINTR) return run_timers; iv_fatal("iv_fd_port_poll: got error %d[%s]", errno, strerror(errno)); } if (ret < 0 && errno == ETIME) run_timers = 1; for (i = 0; i < nget; i++) { int source; source = pe[i].portev_source; if (source == PORT_SOURCE_FD) { int revents; struct iv_fd_ *fd; revents = pe[i].portev_events; fd = pe[i].portev_user; if (revents & (POLLIN | POLLERR | POLLHUP)) iv_fd_make_ready(active, fd, MASKIN); if (revents & (POLLOUT | POLLERR | POLLHUP)) iv_fd_make_ready(active, fd, MASKOUT); if (revents & (POLLERR | POLLHUP)) iv_fd_make_ready(active, fd, MASKERR); fd->registered_bands = 0; iv_list_del_init(&fd->list_notify); if (fd->wanted_bands) { iv_list_add_tail(&fd->list_notify, &st->u.port.notify); } } else if (source == PORT_SOURCE_TIMER) { run_timers = 1; } else if (source == PORT_SOURCE_USER) { run_events = 1; } else { iv_fatal("iv_fd_port_poll: received event " "from unknown source %d", source); } } if (nget == PORTEV_NUM) { run_timers = 1; rel = &_rel; rel->tv_sec = 0; rel->tv_nsec = 0; goto poll_more; } if (run_events) iv_event_run_pending_events(); return run_timers; }