LogQueue * log_queue_fifo_new(gint qoverflow_size, const gchar *persist_name) { LogQueueFifo *self; gint i; self = g_malloc0(sizeof(LogQueueFifo) + log_queue_max_threads * sizeof(self->qoverflow_input[0])); log_queue_init_instance(&self->super, persist_name); self->super.get_length = log_queue_fifo_get_length; self->super.keep_on_reload = log_queue_fifo_keep_on_reload; self->super.push_tail = log_queue_fifo_push_tail; self->super.push_head = log_queue_fifo_push_head; self->super.pop_head = log_queue_fifo_pop_head; self->super.ack_backlog = log_queue_fifo_ack_backlog; self->super.rewind_backlog = log_queue_fifo_rewind_backlog; self->super.free_fn = log_queue_fifo_free; for (i = 0; i < log_queue_max_threads; i++) { INIT_IV_LIST_HEAD(&self->qoverflow_input[i].items); main_loop_io_worker_finish_callback_init(&self->qoverflow_input[i].cb); self->qoverflow_input[i].cb.user_data = self; self->qoverflow_input[i].cb.func = log_queue_fifo_move_input; } INIT_IV_LIST_HEAD(&self->qoverflow_wait); INIT_IV_LIST_HEAD(&self->qoverflow_output); INIT_IV_LIST_HEAD(&self->qbacklog); self->qoverflow_size = qoverflow_size; return &self->super; }
static void iv_work_thread(void *_thr) { struct work_pool_thread *thr = _thr; struct work_pool_priv *pool = thr->pool; iv_init(); INIT_IV_LIST_HEAD(&thr->list); thr->kicked = 0; IV_EVENT_INIT(&thr->kick); thr->kick.cookie = thr; thr->kick.handler = iv_work_thread_got_event; iv_event_register(&thr->kick); IV_TIMER_INIT(&thr->idle_timer); thr->idle_timer.cookie = thr; thr->idle_timer.handler = iv_work_thread_idle_timeout; if (pool->thread_start != NULL) pool->thread_start(pool->cookie); iv_event_post(&thr->kick); iv_main(); iv_deinit(); }
void iv_fd_poll_and_run(struct iv_state *st, struct timespec *to) { struct iv_list_head active; __iv_invalidate_now(st); INIT_IV_LIST_HEAD(&active); method->poll(st, &active, to); while (!iv_list_empty(&active)) { struct iv_fd_ *fd; fd = iv_list_entry(active.next, struct iv_fd_, list_active); iv_list_del_init(&fd->list_active); st->handled_fd = fd; if (fd->ready_bands & MASKERR) if (fd->handler_err != NULL) fd->handler_err(fd->cookie); if (st->handled_fd != NULL && fd->ready_bands & MASKIN) if (fd->handler_in != NULL) fd->handler_in(fd->cookie); if (st->handled_fd != NULL && fd->ready_bands & MASKOUT) if (fd->handler_out != NULL) fd->handler_out(fd->cookie); } }
static int iv_fd_kqueue_init(struct iv_state *st) { int kqueue_fd; kqueue_fd = kqueue(); if (kqueue_fd < 0) return -1; iv_fd_set_cloexec(kqueue_fd); st->u.kqueue.kqueue_fd = kqueue_fd; INIT_IV_LIST_HEAD(&st->u.kqueue.notify); return 0; }
static void iv_fd_register_prologue(struct iv_state *st, struct iv_fd_ *fd) { if (fd->registered) { iv_fatal("iv_fd_register: called with fd which is " "still registered"); } if (fd->fd < 0 || fd->fd >= maxfd) { iv_fatal("iv_fd_register: called with invalid fd %d " "(maxfd=%d)", fd->fd, maxfd); } fd->registered = 1; INIT_IV_LIST_HEAD(&fd->list_active); fd->ready_bands = 0; fd->registered_bands = 0; #if defined(HAVE_SYS_DEVPOLL_H) || defined(HAVE_EPOLL_CREATE) || \ defined(HAVE_KQUEUE) || defined(HAVE_PORT_CREATE) INIT_IV_LIST_HEAD(&fd->list_notify); #endif if (method->register_fd != NULL) method->register_fd(st, fd); }
static int iv_fd_port_init(struct iv_state *st) { int fd; fd = port_create(); if (fd < 0) return -1; iv_fd_set_cloexec(fd); INIT_IV_LIST_HEAD(&st->u.port.notify); st->u.port.port_fd = fd; st->u.port.timer_id = -1; return 0; }
void IV_TASK_INIT(struct iv_task *_t) { struct iv_task_ *t = (struct iv_task_ *)_t; INIT_IV_LIST_HEAD(&t->list); }
void iv_task_init(struct iv_state *st) { INIT_IV_LIST_HEAD(&st->tasks); }