int event_pending (struct event *ev, short events, struct timeval *tv) { short revents = 0; dLOOPev; if (ev->ev_events & EV_SIGNAL) { /* sig */ if (ev_is_active (&ev->iosig.sig) || ev_is_pending (&ev->iosig.sig)) revents |= EV_SIGNAL; } else if (ev->ev_events & (EV_READ | EV_WRITE)) { /* io */ if (ev_is_active (&ev->iosig.io) || ev_is_pending (&ev->iosig.io)) revents |= ev->ev_events & (EV_READ | EV_WRITE); } if (ev->ev_events & EV_TIMEOUT || ev_is_active (&ev->to) || ev_is_pending (&ev->to)) { revents |= EV_TIMEOUT; if (tv) { ev_tstamp at = ev_now (EV_A); tv->tv_sec = (long)at; tv->tv_usec = (long)((at - (ev_tstamp)tv->tv_sec) * 1e6); } } return events & revents; }
int event_pending (struct event *ev, short events, struct timeval *tv) { short revents = 0; dLOOPev; if (ev->ev_events & EV_SIGNAL) { /* sig */ if (ev_is_active (&ev->iosig.sig) || ev_is_pending (&ev->iosig.sig)) revents |= EV_SIGNAL; } else if (ev->ev_events & (EV_READ | EV_WRITE)) { /* io */ if (ev_is_active (&ev->iosig.io) || ev_is_pending (&ev->iosig.io)) revents |= ev->ev_events & (EV_READ | EV_WRITE); } if (ev->ev_events & EV_TIMEOUT || ev_is_active (&ev->to) || ev_is_pending (&ev->to)) { revents |= EV_TIMEOUT; if (tv) EV_TV_SET (tv, ev_now (EV_A)); /* not sure if this is right :) */ } return events & revents; }
int uv_close(uv_handle_t* handle, uv_close_cb close_cb) { uv_tcp_t* tcp; uv_async_t* async; uv_timer_t* timer; handle->close_cb = close_cb; switch (handle->type) { case UV_TCP: tcp = (uv_tcp_t*) handle; uv_read_stop((uv_stream_t*)tcp); ev_io_stop(EV_DEFAULT_ &tcp->write_watcher); break; case UV_PREPARE: uv_prepare_stop((uv_prepare_t*) handle); break; case UV_CHECK: uv_check_stop((uv_check_t*) handle); break; case UV_IDLE: uv_idle_stop((uv_idle_t*) handle); break; case UV_ASYNC: async = (uv_async_t*)handle; ev_async_stop(EV_DEFAULT_ &async->async_watcher); ev_ref(EV_DEFAULT_UC); break; case UV_TIMER: timer = (uv_timer_t*)handle; if (ev_is_active(&timer->timer_watcher)) { ev_ref(EV_DEFAULT_UC); } ev_timer_stop(EV_DEFAULT_ &timer->timer_watcher); break; default: assert(0); return -1; } uv_flag_set(handle, UV_CLOSING); /* This is used to call the on_close callback in the next loop. */ ev_idle_start(EV_DEFAULT_ &handle->next_watcher); ev_feed_event(EV_DEFAULT_ &handle->next_watcher, EV_IDLE); assert(ev_is_pending(&handle->next_watcher)); return 0; }
F_NONNULL static void mon_connect_cb(struct ev_loop* loop, struct ev_io* io, const int revents V_UNUSED) { dmn_assert(loop); dmn_assert(io); dmn_assert(revents == EV_WRITE); tcp_events_t* md = io->data; dmn_assert(md); dmn_assert(md->tcp_state == TCP_STATE_CONNECTING); dmn_assert(ev_is_active(md->connect_watcher)); dmn_assert(ev_is_active(md->timeout_watcher) || ev_is_pending(md->timeout_watcher)); dmn_assert(md->sock > -1); // nonblocking connect() just finished, need to check status bool success = false; int sock = md->sock; int so_error = 0; unsigned so_error_len = sizeof(so_error); (void)getsockopt(sock, SOL_SOCKET, SO_ERROR, &so_error, &so_error_len); if(unlikely(so_error)) { switch(so_error) { case EPIPE: case ECONNREFUSED: case ETIMEDOUT: case EHOSTUNREACH: case EHOSTDOWN: case ENETUNREACH: log_debug("plugin_tcp_connect: State poll of %s failed quickly: %s", md->desc, dmn_logf_strerror(so_error)); break; default: log_err("plugin_tcp_connect: Failed to connect() monitoring socket to remote server, possible local problem: %s", dmn_logf_strerror(so_error)); } } else { success = true; } shutdown(sock, SHUT_RDWR); close(sock); md->sock = -1; ev_io_stop(loop, md->connect_watcher); ev_timer_stop(loop, md->timeout_watcher); md->tcp_state = TCP_STATE_WAITING; gdnsd_mon_state_updater(md->idx, success); }
int uv_close(uv_handle_t* handle) { switch (handle->type) { case UV_TCP: ev_io_stop(EV_DEFAULT_ &handle->write_watcher); ev_io_stop(EV_DEFAULT_ &handle->read_watcher); break; case UV_PREPARE: uv_prepare_stop(handle); break; case UV_CHECK: uv_check_stop(handle); break; case UV_IDLE: uv_idle_stop(handle); break; case UV_ASYNC: ev_async_stop(EV_DEFAULT_ &handle->async_watcher); ev_ref(EV_DEFAULT_UC); break; case UV_TIMER: if (ev_is_active(&handle->timer_watcher)) { ev_ref(EV_DEFAULT_UC); } ev_timer_stop(EV_DEFAULT_ &handle->timer_watcher); break; default: assert(0); return -1; } uv_flag_set(handle, UV_CLOSING); /* This is used to call the on_close callback in the next loop. */ ev_idle_start(EV_DEFAULT_ &handle->next_watcher); ev_feed_event(EV_DEFAULT_ &handle->next_watcher, EV_IDLE); assert(ev_is_pending(&handle->next_watcher)); return 0; }
void uv_close(uv_handle_t* handle, uv_close_cb close_cb) { uv_async_t* async; uv_stream_t* stream; uv_process_t* process; handle->close_cb = close_cb; switch (handle->type) { case UV_NAMED_PIPE: uv_pipe_cleanup((uv_pipe_t*)handle); /* Fall through. */ case UV_TTY: case UV_TCP: stream = (uv_stream_t*)handle; uv_read_stop(stream); ev_io_stop(stream->loop->ev, &stream->write_watcher); uv__close(stream->fd); stream->fd = -1; if (stream->accepted_fd >= 0) { uv__close(stream->accepted_fd); stream->accepted_fd = -1; } assert(!ev_is_active(&stream->read_watcher)); assert(!ev_is_active(&stream->write_watcher)); break; case UV_UDP: uv__udp_start_close((uv_udp_t*)handle); break; case UV_PREPARE: uv_prepare_stop((uv_prepare_t*) handle); break; case UV_CHECK: uv_check_stop((uv_check_t*) handle); break; case UV_IDLE: uv_idle_stop((uv_idle_t*) handle); break; case UV_ASYNC: async = (uv_async_t*)handle; ev_async_stop(async->loop->ev, &async->async_watcher); ev_ref(async->loop->ev); break; case UV_TIMER: uv_timer_stop((uv_timer_t*)handle); break; case UV_PROCESS: process = (uv_process_t*)handle; ev_child_stop(process->loop->ev, &process->child_watcher); break; case UV_FS_EVENT: uv__fs_event_destroy((uv_fs_event_t*)handle); break; default: assert(0); } handle->flags |= UV_CLOSING; /* This is used to call the on_close callback in the next loop. */ ev_idle_start(handle->loop->ev, &handle->next_watcher); ev_feed_event(handle->loop->ev, &handle->next_watcher, EV_IDLE); assert(ev_is_pending(&handle->next_watcher)); }
int uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb) { struct sockaddr_un sun; int saved_errno; int sockfd; int status; int r; saved_errno = errno; sockfd = -1; status = -1; if ((sockfd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { uv_err_new(handle->loop, errno); goto out; } memset(&sun, 0, sizeof sun); uv__strlcpy(sun.sun_path, name, sizeof(sun.sun_path)); sun.sun_family = AF_UNIX; /* We don't check for EINPROGRESS. Think about it: the socket * is either there or not. */ do { r = connect(sockfd, (struct sockaddr*)&sun, sizeof sun); } while (r == -1 && errno == EINTR); if (r == -1) { uv_err_new(handle->loop, errno); uv__close(sockfd); goto out; } uv__stream_open((uv_stream_t*)handle, sockfd, UV_READABLE | UV_WRITABLE); ev_io_start(handle->loop->ev, &handle->read_watcher); ev_io_start(handle->loop->ev, &handle->write_watcher); status = 0; out: handle->delayed_error = status; /* Passed to callback. */ handle->connect_req = req; req->handle = (uv_stream_t*)handle; req->type = UV_CONNECT; req->cb = cb; ngx_queue_init(&req->queue); /* Run callback on next tick. */ ev_feed_event(handle->loop->ev, &handle->read_watcher, EV_CUSTOM); assert(ev_is_pending(&handle->read_watcher)); /* Mimic the Windows pipe implementation, always * return 0 and let the callback handle errors. */ errno = saved_errno; return 0; }
F_NONNULL static void mon_interval_cb(struct ev_loop* loop, struct ev_timer* t, const int revents V_UNUSED) { dmn_assert(loop); dmn_assert(t); dmn_assert(revents == EV_TIMER); tcp_events_t* md = t->data; dmn_assert(md); if(md->tcp_state != TCP_STATE_WAITING) { log_warn("plugin_tcp_connect: A monitoring request attempt seems to have " "lasted longer than the monitoring interval. " "Skipping this round of monitoring - are you " "starved for CPU time?"); return; } dmn_assert(md->sock == -1); dmn_assert(!ev_is_active(md->connect_watcher)); dmn_assert(!ev_is_active(md->timeout_watcher) && !ev_is_pending(md->timeout_watcher)); log_debug("plugin_tcp_connect: Starting state poll of %s", md->desc); const bool isv6 = md->addr.sa.sa_family == AF_INET6; const int sock = socket(isv6 ? PF_INET6 : PF_INET, SOCK_STREAM, gdnsd_getproto_tcp()); if(sock == -1) { log_err("plugin_tcp_connect: Failed to create monitoring socket: %s", dmn_logf_errno()); return; } if(fcntl(sock, F_SETFL, (fcntl(sock, F_GETFL, 0)) | O_NONBLOCK) == -1) { log_err("plugin_tcp_connect: Failed to set O_NONBLOCK on monitoring socket: %s", dmn_logf_errno()); close(sock); return; } bool success = false; if(likely(connect(sock, &md->addr.sa, md->addr.len) == -1)) { switch(errno) { case EINPROGRESS: // this is the normal case, where nonblock connect // wants us to wait for writability... md->sock = sock; md->tcp_state = TCP_STATE_CONNECTING; ev_io_set(md->connect_watcher, sock, EV_WRITE); ev_io_start(loop, md->connect_watcher); ev_timer_set(md->timeout_watcher, md->tcp_svc->timeout, 0); ev_timer_start(loop, md->timeout_watcher); return; // don't do socket/status finishing actions below... break; // redundant case EPIPE: case ECONNREFUSED: case ETIMEDOUT: case EHOSTUNREACH: case EHOSTDOWN: case ENETUNREACH: // fast remote failures, e.g. when remote is local, I hope log_debug("plugin_tcp_connect: State poll of %s failed very quickly", md->desc); break; default: log_err("plugin_tcp_connect: Failed to connect() monitoring socket to remote server, possible local problem: %s", dmn_logf_errno()); } } else { success = true; } close(sock); gdnsd_mon_state_updater(md->idx, success); }
/** * Test if the watcher is pending. * * Usage: * bool = watcher:is_pending() * * [+1, -0, e] */ static int watcher_is_pending(lua_State *L) { lua_pushboolean(L, ev_is_pending(check_watcher(L, 1))); return 1; }
static PyObject* IO_is_pending(libevwrapper_IO *self, PyObject *args) { struct ev_io *io = &(self->io); return PyBool_FromLong(ev_is_pending(io)); }