static void start_hostname_lookup(struct http_client_request *req) { // @@@ start lookup for req->redirect_hostname if (!inet_aton(req->redirect_hostname, &req->resolved_ip)) req->resolved_ip.s_addr = htonl(0xffffffff); req->hostname_lookup_callback_scheduled = 1; iv_task_register(&req->hostname_lookup); }
void test_source_finish_tc(TestSource *self) { if (self->current_test_case && self->current_test_case->finish) { self->current_test_case->finish(self->current_test_case); } iv_task_register(&self->stop); }
static void afinter_source_update_watches(AFInterSource *self) { if (!log_source_free_to_send(&self->super)) { /* ok, we go to sleep now. let's disable the post event by setting * current_internal_source to NULL. Messages get accumulated into * internal_msg_queue. */ g_static_mutex_lock(&internal_msg_lock); current_internal_source = NULL; g_static_mutex_unlock(&internal_msg_lock); /* Possible race: * * After the check log_source_free_to_send() above, the destination * may actually write out a message, thus by the time we get here, the * window may have space again. This is taken care of by the fact * that the wakeup is running in the main thread, which we do too. So * the wakeup is either completely performed before we entered this * function, or after we return. * * In case it happened earlier, the check above will find that we have * window space, in case it's going to be happening afterwards, we * will be woken up by the schedule_wakeup event (which calls * update_watches again). */ /* MARK events also get disabled */ afinter_source_stop_watches(self); } else { /* ok we can send our stuff. make sure we wake up */ afinter_source_stop_watches(self); self->mark_timer.expires = next_mark_target; afinter_source_start_watches(self); /* Possible race: * * Our current_internal_source pointer is set to NULL here (in case * we're just waking up). In case the sender submits a message, it'll * not trigger the self->post (since the pointer is NULL). This is * taken care of by the queue-length check in the locked region below. * If the queue has elements, we need to wake up, because we may have * lost a wakeup call. If it happens after the locked region, that * doesn't matter, in that case we already pointed * current_internal_source to ourselves, thus the post event will also * be triggered. */ g_static_mutex_lock(&internal_msg_lock); if (internal_msg_queue && g_queue_get_length(internal_msg_queue) > 0) iv_task_register(&self->restart_task); current_internal_source = self; g_static_mutex_unlock(&internal_msg_lock); } }
static void log_threaded_dest_driver_wake_up(gpointer data) { LogThrDestDriver *self = (LogThrDestDriver *)data; if (!iv_task_registered(&self->do_work)) { iv_task_register(&self->do_work); } }
static void _force_check_in_next_poll(JournalReader *self) { self->immediate_check = FALSE; poll_events_suspend_watches(self->poll_events); self->suspended = FALSE; if (!iv_task_registered(&self->restart_task)) { iv_task_register(&self->restart_task); } }
void test_source_run_tests(TestSource *self) { self->current_test = self->tests; while (self->current_test) { self->current_test_case = self->current_test->data; iv_task_register(&self->start); iv_main(); log_pipe_deinit(&self->super); self->current_test = self->current_test->next; } }
static void log_writer_update_fd_callbacks(LogWriter *self, GIOCondition cond) { main_loop_assert_main_thread(); if (self->pollable_state > 0) { if (self->flags & LW_DETECT_EOF && (cond & G_IO_IN) == 0 && (cond & G_IO_OUT)) { /* if output is enabled, and we're in DETECT_EOF mode, and input is * not needed by the log protocol, install the eof check callback to * destroy the connection if an EOF is received. */ iv_fd_set_handler_in(&self->fd_watch, log_writer_io_check_eof); } else if (cond & G_IO_IN) { /* in case the protocol requested G_IO_IN, it means that it needs to * invoke read in the flush code, so just install the flush_output * handler for input */ iv_fd_set_handler_in(&self->fd_watch, log_writer_io_flush_output); } else { /* otherwise we're not interested in input */ iv_fd_set_handler_in(&self->fd_watch, NULL); } if (cond & G_IO_OUT) iv_fd_set_handler_out(&self->fd_watch, log_writer_io_flush_output); else iv_fd_set_handler_out(&self->fd_watch, NULL); iv_fd_set_handler_err(&self->fd_watch, log_writer_io_error); } else { /* fd is not pollable, assume it is always writable */ if (cond & G_IO_OUT) { if (!iv_task_registered(&self->immed_io_task)) iv_task_register(&self->immed_io_task); } else if (iv_task_registered(&self->immed_io_task)) { iv_task_unregister(&self->immed_io_task); } } }
int main() { alarm(5); iv_init(); IV_TASK_INIT(&task); task.handler = handler_task; iv_task_register(&task); IV_TIMER_INIT(&timer); iv_validate_now(); timer.expires = iv_now; timer.expires.tv_sec--; timer.handler = handler_timer; iv_main(); iv_deinit(); return !success; }
/* * this shuld replace the g_get_current_time and the g_source_get_current_time calls in the main thread * (log_msg_init, afinter_postpone_mark) */ void cached_g_current_time(GTimeVal *result) { if (current_time_value.tv_sec == 0) { g_get_current_time(¤t_time_value); } *result = current_time_value; if (iv_inited()) { if (invalidate_time_task.handler == NULL) { IV_TASK_INIT(&invalidate_time_task); invalidate_time_task.handler = (void (*)(void *)) invalidate_cached_time; } if (!iv_task_registered(&invalidate_time_task)) iv_task_register(&invalidate_time_task); } else { invalidate_cached_time(); } }
void __test_fd_handling(Journald *journald) { gint fd = journald_get_fd(journald); journald_process(journald); task_called = FALSE; poll_triggered = FALSE; struct iv_task add_entry_task; struct iv_fd fd_to_poll; struct iv_timer stop_timer; IV_TASK_INIT(&add_entry_task); add_entry_task.cookie = journald; add_entry_task.handler = add_mock_entries; IV_FD_INIT(&fd_to_poll); fd_to_poll.fd = fd; fd_to_poll.cookie = journald; fd_to_poll.handler_in = handle_new_entry; iv_validate_now(); IV_TIMER_INIT(&stop_timer); stop_timer.cookie = NULL; stop_timer.expires = iv_now; stop_timer.expires.tv_sec++; stop_timer.handler = stop_timer_expired; iv_task_register(&add_entry_task); iv_fd_register(&fd_to_poll); iv_timer_register(&stop_timer); iv_main(); assert_true(poll_triggered, ASSERTION_ERROR("Poll event isn't triggered")); }
static void log_reader_update_watches(LogReader *self) { gint fd; GIOCondition cond; gboolean free_to_send; main_loop_assert_main_thread(); self->suspended = FALSE; free_to_send = log_source_free_to_send(&self->super); if (!free_to_send || self->immediate_check || log_proto_prepare(self->proto, &fd, &cond)) { /* we disable all I/O related callbacks here because we either know * that we can continue (e.g. immediate_check == TRUE) or we know * that we can't continue even if data would be available (e.g. * free_to_send == FALSE) */ self->immediate_check = FALSE; if (iv_fd_registered(&self->fd_watch)) { iv_fd_set_handler_in(&self->fd_watch, NULL); iv_fd_set_handler_out(&self->fd_watch, NULL); /* we disable the error handler too, as it might be * triggered even when we don't want to read data * (e.g. log_source_free_to_send() is FALSE). * * And at least on Linux, it may happen that EPOLLERR is * set, while there's still data in the socket buffer. Thus * in reaction to an EPOLLERR, we could possibly send * further messages without validating the * log_source_free_to_send() would allow us to, potentially * overflowing our window (and causing a failed assertion in * log_source_queue(). */ iv_fd_set_handler_err(&self->fd_watch, NULL); } if (iv_timer_registered(&self->follow_timer)) iv_timer_unregister(&self->follow_timer); if (free_to_send) { /* we have data in our input buffer, we need to start working * on it immediately, without waiting for I/O events */ if (!iv_task_registered(&self->restart_task)) { iv_task_register(&self->restart_task); } } else { self->suspended = TRUE; } return; } if (iv_fd_registered(&self->fd_watch)) { /* this branch is executed when our fd is connected to a non-file * source (e.g. TCP, UDP socket). We set up I/O callbacks here. * files cannot be polled using epoll, as it causes an I/O error * (thus abort in ivykis). */ if (cond & G_IO_IN) iv_fd_set_handler_in(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_in(&self->fd_watch, NULL); if (cond & G_IO_OUT) iv_fd_set_handler_out(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_out(&self->fd_watch, NULL); if (cond & (G_IO_IN + G_IO_OUT)) iv_fd_set_handler_err(&self->fd_watch, log_reader_io_process_input); else iv_fd_set_handler_err(&self->fd_watch, NULL); } else { if (self->options->follow_freq > 0) { if (iv_timer_registered(&self->follow_timer)) iv_timer_unregister(&self->follow_timer); iv_validate_now(); self->follow_timer.expires = iv_now; timespec_add_msec(&self->follow_timer.expires, self->options->follow_freq); iv_timer_register(&self->follow_timer); } else { /* NOTE: we don't need to unregister the timer here as follow_freq * never changes during runtime, thus if ever it was registered that * also means that we go into the if branch above. */ } } }
static void log_threaded_dest_driver_start_watches(LogThrDestDriver* self) { iv_task_register(&self->do_work); }