Example #1
0
void *MainService(void * arg)
{
	
	status=INITIALIZE;
	if(init("config","orderid")){
		puts("Initialization error.");
		exit(1);
	}
	else
		status=READY;
	update_display("default.png",0);

	
	libusb_init(NULL);
	handle=libusb_open_device_with_vid_pid(NULL,USB_VENDORID,USB_PRODUCTID);
	if(!handle)
	{	
		printf("OPEN DEVICE ERROR.\n");
		getchar();
		exit(1);
	}
	libusb_claim_interface(handle,0);
	pthread_t readusb_thread;
	main_thread=pthread_self();
	struct ev_loop *loop = EV_DEFAULT;
	ev_init (&net_watcher, net_cb);
	ev_signal_init(&usb_watcher,usb_cb,SIGUSR1);
	ev_signal_start(loop,&usb_watcher);
	if(pthread_create(&readusb_thread,NULL,read_from_usb,NULL))
	{
		printf("CREATE THREAD ERROR.\n");
		getchar();
		exit(1);
	}
	
	while(1)
	{
		if(!ev_is_active(EV_A_ &net_watcher))
		{
			ev_io_set (&net_watcher, fd_net, EV_READ);
			ev_io_start(loop,&net_watcher);
		}
		ev_run (loop, 0);
		if(net_io)
			ev_io_stop (EV_A_ &net_watcher);
	}
	munmap((void *)p_order_id,sizeof(unsigned int));
	ghttp_close(request);
	ghttp_request_destroy(request);
	return 0;
}
Example #2
0
File: ebb.c Project: bakins/libebb
static void 
on_handshake(struct ev_loop *loop ,ev_io *watcher, int revents)
{
  ebb_connection *connection = watcher->data;

  //printf("on_handshake\n");

  assert(ev_is_active(&connection->timeout_watcher));
  assert(!ev_is_active(&connection->read_watcher));
  assert(!ev_is_active(&connection->write_watcher));

  if(EV_ERROR & revents) {
    error("on_handshake() got error event, closing connection.n");
    goto error;
  }

  int r = gnutls_handshake(connection->session);
  if(r < 0) {
    if(gnutls_error_is_fatal(r)) goto error;
    if(r == GNUTLS_E_INTERRUPTED || r == GNUTLS_E_AGAIN)
      ev_io_set( watcher
               , connection->fd
               , (GNUTLS_NEED_WRITE ? EV_WRITE : EV_READ)
               );
    return;
  }

  ebb_connection_reset_timeout(connection);
  ev_io_stop(loop, watcher);

  ev_io_start(loop, &connection->read_watcher);
  if(CONNECTION_HAS_SOMETHING_TO_WRITE)
    ev_io_start(loop, &connection->write_watcher);

  return;
error:
  close_connection(connection);
}
Example #3
0
/*
 * Callback to setup DNS timeout callback
 */
static void
dns_timer_setup_cb(struct dns_ctx *ctx, int timeout, void *data)
{
    struct ev_loop *loop = (struct ev_loop *)data;

    if (ev_is_active(&resolv_timeout_watcher)) {
        ev_timer_stop(loop, &resolv_timeout_watcher);
    }

    if (ctx != NULL && timeout >= 0) {
        ev_timer_set(&resolv_timeout_watcher, timeout, 0.0);
        ev_timer_start(loop, &resolv_timeout_watcher);
    }
}
Example #4
0
static void uv__read(uv_stream_t* stream) {
  uv_buf_t buf;
  ssize_t nread;
  struct ev_loop* ev = stream->loop->ev;

  /* XXX: Maybe instead of having UV_READING we just test if
   * tcp->read_cb is NULL or not?
   */
  while (stream->read_cb && ((uv_handle_t*)stream)->flags & UV_READING) {
    assert(stream->alloc_cb);
    buf = stream->alloc_cb((uv_handle_t*)stream, 64 * 1024);

    assert(buf.len > 0);
    assert(buf.base);
    assert(stream->fd >= 0);

    do {
      nread = read(stream->fd, buf.base, buf.len);
    }
    while (nread < 0 && errno == EINTR);

    if (nread < 0) {
      /* Error */
      if (errno == EAGAIN) {
        /* Wait for the next one. */
        if (stream->flags & UV_READING) {
          ev_io_start(ev, &stream->read_watcher);
        }
        uv_err_new(stream->loop, EAGAIN);
        stream->read_cb(stream, 0, buf);
        return;
      } else {
        /* Error. User should call uv_close(). */
        uv_err_new(stream->loop, errno);
        stream->read_cb(stream, -1, buf);
        assert(!ev_is_active(&stream->read_watcher));
        return;
      }
    } else if (nread == 0) {
      /* EOF */
      uv_err_new_artificial(stream->loop, UV_EOF);
      ev_io_stop(ev, &stream->read_watcher);
      stream->read_cb(stream, -1, buf);
      return;
    } else {
      /* Successful read */
      stream->read_cb(stream, nread, buf);
    }
  }
}
Example #5
0
F_NONNULL
static void mon_timeout_cb(struct ev_loop* loop, struct ev_timer* t, const int revents V_UNUSED) {
    dmn_assert(loop); dmn_assert(t);
    dmn_assert(revents == EV_TIMER);

    http_events_t* md = (http_events_t*)t->data;

    dmn_assert(md);
    dmn_assert(md->sock != -1);
    dmn_assert(
        (md->hstate == HTTP_STATE_READING && ev_is_active(md->read_watcher))
     || (md->hstate == HTTP_STATE_WRITING && ev_is_active(md->write_watcher))
    );

    log_debug("plugin_http_status: State poll of %s timed out", md->smgr->desc);
    if(md->hstate == HTTP_STATE_READING) ev_io_stop(loop, md->read_watcher);
    else if(md->hstate == HTTP_STATE_WRITING) ev_io_stop(loop, md->write_watcher);
    shutdown(md->sock, SHUT_RDWR);
    close(md->sock);
    md->sock = -1;
    md->hstate = HTTP_STATE_WAITING;
    gdnsd_mon_state_updater(md->smgr, false);
}
Example #6
0
/* Callback from ares when socket operation is started */
static void uv__ares_sockstate_cb(void* data, ares_socket_t sock,
    int read, int write) {
  uv_ares_task_t* h = uv_find_ares_handle(sock);

  if (read || write) {
    if (!h) {
      /* New socket */

      /* If this is the first socket then start the timer. */
      if (!ev_is_active(&ares_data.timer)) {
        assert(uv_ares_handles_empty());
        ev_timer_again(EV_DEFAULT_UC_ &ares_data.timer);
      }

      h = uv__ares_task_create(sock);
      uv_add_ares_handle(h);
    }

    if (read) {
      ev_io_start(EV_DEFAULT_UC_ &h->read_watcher);
    } else {
      ev_io_stop(EV_DEFAULT_UC_ &h->read_watcher);
    }

    if (write) {
      ev_io_start(EV_DEFAULT_UC_ &h->write_watcher);
    } else {
      ev_io_stop(EV_DEFAULT_UC_ &h->write_watcher);
    }

  } else {
    /*
     * read == 0 and write == 0 this is c-ares's way of notifying us that
     * the socket is now closed. We must free the data associated with
     * socket.
     */
    assert(h && "When an ares socket is closed we should have a handle for it");

    ev_io_stop(EV_DEFAULT_UC_ &h->read_watcher);
    ev_io_stop(EV_DEFAULT_UC_ &h->write_watcher);

    uv_remove_ares_handle(h);
    free(h);

    if (uv_ares_handles_empty()) {
      ev_timer_stop(EV_DEFAULT_UC_ &ares_data.timer);
    }
  }
}
Example #7
0
static void session_set_socket(struct session *session, struct session_socket *info, curl_socket_t s, int action)
{
	g_debug("%s", __PRETTY_FUNCTION__);

	int kind = (action&CURL_POLL_IN?EV_READ:0)|(action&CURL_POLL_OUT?EV_WRITE:0);

	info->sockfd = s;
	info->action = action;
	info->session = session;
	if( ev_is_active(&info->io) )
		ev_io_stop(g_dionaea->loop, &info->io);
	ev_io_init(&info->io, event_cb, info->sockfd, kind);
	if( kind != 0 )
		ev_io_start(g_dionaea->loop, &info->io);
}
Example #8
0
static void
start_watcher(struct ev_loop *loop,
              ev_io *watcher,
              int events)
{
    /* We cannot modify the watcher while it is active. Yank it back.  */
    if (ev_is_active(watcher))
    {
        ev_io_stop(loop, watcher);
    }

    /* Update the flags and start the watcher.  */
    ev_io_set(watcher, watcher->fd, events);
    ev_io_start(loop, watcher);
}
Example #9
0
void uv__read(uv_handle_t* handle) {
    /* XXX: Maybe instead of having UV_READING we just test if
     * handle->read_cb is NULL or not?
     */
    while (handle->read_cb && uv_flag_is_set(handle, UV_READING)) {
        assert(alloc_cb);
        uv_buf_t buf = alloc_cb(handle, 64 * 1024);

        assert(buf.len > 0);
        assert(buf.base);

        struct iovec* iov = (struct iovec*) &buf;

        ssize_t nread = readv(handle->fd, iov, 1);

        if (nread < 0) {
            /* Error */
            if (errno == EAGAIN) {
                /* Wait for the next one. */
                if (uv_flag_is_set(handle, UV_READING)) {
                    ev_io_start(EV_DEFAULT_UC_ &handle->read_watcher);
                }
                uv_err_new(handle, EAGAIN);
                handle->read_cb(handle, 0, buf);
                return;
            } else {
                uv_err_new(handle, errno);
                uv_close(handle);
                handle->read_cb(handle, -1, buf);
                assert(!ev_is_active(&handle->read_watcher));
                return;
            }
        } else if (nread == 0) {
            /* EOF */
            uv_err_new_artificial(handle, UV_EOF);
            ev_io_stop(EV_DEFAULT_UC_ &handle->read_watcher);
            handle->read_cb(handle, -1, buf);

            if (uv_flag_is_set(handle, UV_SHUT)) {
                uv_close(handle);
            }
            return;
        } else {
            /* Successful read */
            handle->read_cb(handle, nread, buf);
        }
    }
}
Example #10
0
void uv__read(uv_tcp_t* tcp) {
  uv_buf_t buf;
  struct iovec* iov;
  ssize_t nread;

  /* XXX: Maybe instead of having UV_READING we just test if
   * tcp->read_cb is NULL or not?
   */
  while (tcp->read_cb && uv_flag_is_set((uv_handle_t*)tcp, UV_READING)) {
    assert(tcp->alloc_cb);
    buf = tcp->alloc_cb((uv_stream_t*)tcp, 64 * 1024);

    assert(buf.len > 0);
    assert(buf.base);

    iov = (struct iovec*) &buf;

    nread = read(tcp->fd, buf.base, buf.len);

    if (nread < 0) {
      /* Error */
      if (errno == EAGAIN) {
        /* Wait for the next one. */
        if (uv_flag_is_set((uv_handle_t*)tcp, UV_READING)) {
          ev_io_start(EV_DEFAULT_UC_ &tcp->read_watcher);
        }
        uv_err_new((uv_handle_t*)tcp, EAGAIN);
        tcp->read_cb((uv_stream_t*)tcp, 0, buf);
        return;
      } else {
        /* Error. User should call uv_close(). */
        uv_err_new((uv_handle_t*)tcp, errno);
        tcp->read_cb((uv_stream_t*)tcp, -1, buf);
        assert(!ev_is_active(&tcp->read_watcher));
        return;
      }
    } else if (nread == 0) {
      /* EOF */
      uv_err_new_artificial((uv_handle_t*)tcp, UV_EOF);
      ev_io_stop(EV_DEFAULT_UC_ &tcp->read_watcher);
      tcp->read_cb((uv_stream_t*)tcp, -1, buf);
      return;
    } else {
      /* Successful read */
      tcp->read_cb((uv_stream_t*)tcp, nread, buf);
    }
  }
}
Example #11
0
static void event_timer_cb(struct ev_loop *loop, ev_timer *w, int revents) {
	liEventTimer *timer = LI_CONTAINER_OF(w, liEventTimer, libevmess.timer);
	liEventLoop *my_loop = timer->base.link_watchers.data;
	UNUSED(revents);

	LI_FORCE_ASSERT(NULL != my_loop);
	LI_FORCE_ASSERT(loop == my_loop->loop);

	if (ev_is_active(w)) {
		if (!timer->base.keep_loop_alive) ev_ref(loop);
		ev_timer_stop(loop, w);
	}
	timer->base.active = 0;

	timer->base.callback(&timer->base, LI_EV_WAKEUP);
}
Example #12
0
static void event_child_cb(struct ev_loop *loop, ev_child *w, int revents) {
	liEventChild *child = LI_CONTAINER_OF(w, liEventChild, libevmess.child);
	liEventLoop *my_loop = child->base.link_watchers.data;
	UNUSED(revents);

	LI_FORCE_ASSERT(NULL != my_loop);
	LI_FORCE_ASSERT(loop == my_loop->loop);

	if (ev_is_active(w)) {
		if (!child->base.keep_loop_alive) ev_ref(loop);
		ev_child_stop(loop, w);
	}
	child->base.active = 0;

	child->base.callback(&child->base, LI_EV_WAKEUP);
}
Example #13
0
int event_del (struct event *ev)
{
  dLOOPev;

  if (ev->ev_events & EV_SIGNAL)
    ev_signal_stop (EV_A_ &ev->iosig.sig);
  else if (ev->ev_events & (EV_READ | EV_WRITE))
    ev_io_stop (EV_A_ &ev->iosig.io);

  if (ev_is_active (&ev->to))
    ev_timer_stop (EV_A_ &ev->to);

  ev->ev_flags = EVLIST_INIT;

  return 0;
}
Example #14
0
/**
 * Implements the callback function on all the watcher objects.  This
 * will be indirectly called by the libev event loop implementation.
 *
 * TODO: Custom error handlers?  Currently, any error in a callback
 * will print the error to stderr and things will "go on".
 *
 * [+0, -0, m]
 */
static void watcher_cb(struct ev_loop *loop, void *watcher, int revents) {
    lua_State* L       = ev_userdata(loop);
    void*      objs[3] = { loop, watcher, NULL };
    int        result;

    lua_pushcfunction(L, traceback);

    result = lua_checkstack(L, 5);
    assert(result != 0 /* able to allocate enough space on lua stack */);
    result = push_objs(L, objs);
    assert(result == 2 /* pushed two objects on the lua stack */);
    assert(!lua_isnil(L, -2) /* the loop obj was resolved */);
    assert(!lua_isnil(L, -1) /* the watcher obj was resolved */);

    /* STACK: <traceback>, <loop>, <watcher> */

    if ( !ev_is_active(watcher) ) {
        /* Must remove "stop"ed watcher from loop: */
        loop_stop_watcher(L, -2, -1);
    }

    lua_getfenv(L, -1);
    assert(lua_istable(L, -1) /* The watcher fenv was found */);
    lua_rawgeti(L, -1, WATCHER_FN);
    if ( lua_isnil(L, -1) ) {
        /* The watcher function was set to nil, so do nothing */
        lua_pop(L, 5);
        return;
    }
    assert(lua_isfunction(L, -1) /* watcher function is a function */);

    /* STACK: <traceback>, <loop>, <watcher>, <watcher fenv>, <watcher fn> */

    lua_insert(L, -4);
    lua_pop(L, 1);
    lua_pushinteger(L, revents);

    /* STACK: <traceback>, <watcher fn>, <loop>, <watcher>, <revents> */
    if ( lua_pcall(L, 3, 0, -5) ) {
        /* TODO: Enable user-specified error handler! */
        fprintf(stderr, "CALLBACK FAILED: %s\n",
                lua_tostring(L, -1));
        lua_pop(L, 2);
    } else {
        lua_pop(L, 1);
    }
}
Example #15
0
/*
 * Handle c-ares events
 */
static void
resolv_sock_state_cb(void *data, int s, int read, int write)
{
    struct resolv_ctx *ctx = (struct resolv_ctx *)data;
    int io_active          = ev_is_active(&ctx->io);

    if (read || write) {
        if (io_active && ctx->io.fd != s) {
            ev_io_stop(default_loop, &ctx->io);
        }
        ev_io_set(&ctx->io, s, (read ? EV_READ : 0) | (write ? EV_WRITE : 0));
        ev_io_start(default_loop, &ctx->io);
    } else {
        ev_io_stop(default_loop, &ctx->io);
        ev_io_set(&ctx->io, -1, 0);
    }
}
Example #16
0
int uv_close(uv_handle_t* handle) {
    switch (handle->type) {
    case UV_TCP:
        ev_io_stop(EV_DEFAULT_ &handle->write_watcher);
        ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
        break;

    case UV_PREPARE:
        uv_prepare_stop(handle);
        break;

    case UV_CHECK:
        uv_check_stop(handle);
        break;

    case UV_IDLE:
        uv_idle_stop(handle);
        break;

    case UV_ASYNC:
        ev_async_stop(EV_DEFAULT_ &handle->async_watcher);
        ev_ref(EV_DEFAULT_UC);
        break;

    case UV_TIMER:
        if (ev_is_active(&handle->timer_watcher)) {
            ev_ref(EV_DEFAULT_UC);
        }
        ev_timer_stop(EV_DEFAULT_ &handle->timer_watcher);
        break;

    default:
        assert(0);
        return -1;
    }

    uv_flag_set(handle, UV_CLOSING);

    /* This is used to call the on_close callback in the next loop. */
    ev_idle_start(EV_DEFAULT_ &handle->next_watcher);
    ev_feed_event(EV_DEFAULT_ &handle->next_watcher, EV_IDLE);
    assert(ev_is_pending(&handle->next_watcher));

    return 0;
}
Example #17
0
void uv__udp_watcher_stop(uv_udp_t* handle, ev_io* w) {
  int flags;

  if (!ev_is_active(w)) {
    return;
  }

  assert(w == &handle->read_watcher
      || w == &handle->write_watcher);

  flags = (w == &handle->read_watcher ? EV_READ : EV_WRITE);

  ev_ref(handle->loop->ev);
  ev_io_stop(handle->loop->ev, w);
  ev_io_set(w, -1, flags);
  ev_set_cb(w, NULL);
  w->data = (void*)0xDEADBABE;
}
Example #18
0
static void uv__udp_watcher_start(uv_udp_t* handle, ev_io* w) {
  int flags;

  if (ev_is_active(w)) {
    return;
  }

  assert(w == &handle->read_watcher
      || w == &handle->write_watcher);

  flags = (w == &handle->read_watcher ? EV_READ : EV_WRITE);

  w->data = handle;
  ev_set_cb(w, uv__udp_io);
  ev_io_set(w, handle->fd, flags);
  ev_io_start(handle->loop->ev, w);
  ev_unref(handle->loop->ev);
}
Example #19
0
F_NONNULL
static void mon_timeout_cb(struct ev_loop* loop, struct ev_timer* t, const int revents V_UNUSED) {
    dmn_assert(loop); dmn_assert(t);
    dmn_assert(revents == EV_TIMER);

    tcp_events_t* md = t->data;

    dmn_assert(md);
    dmn_assert(md->sock > -1);
    dmn_assert(md->tcp_state == TCP_STATE_CONNECTING);
    dmn_assert(ev_is_active(md->connect_watcher));

    log_debug("plugin_tcp_connect: State poll of %s timed out", md->desc);
    ev_io_stop(loop, md->connect_watcher);
    shutdown(md->sock, SHUT_RDWR);
    close(md->sock);
    md->sock = -1;
    md->tcp_state = TCP_STATE_WAITING;
    gdnsd_mon_state_updater(md->idx, false);
}
Example #20
0
void fp_write_out(muxConn *mc, char *msg, int msg_len) {
  fp_transport_data *data = mc->transport_data;
  // should not attempt to write to the client before it sends the handshake
  int needed = mc->outBufOffset + mc->outBufToWrite + msg_len + 2; // \r\n
  // grow output buffer if needed
  if (mc->outBufLen < needed) {
    mc->outBufLen = needed * 1.2;
    mc->outBuf = realloc(mc->outBuf, mc->outBufLen);
  }
  int p = mc->outBufOffset + mc->outBufToWrite;
  memcpy(mc->outBuf + p, msg, msg_len);
  mc->outBufToWrite += msg_len + 2;
  if (!ev_is_active(mc->watcher)) {
# if EV_MULTIPLICITY
    ev_io_start(mc->loop, mc->watcher);
# else
    ev_io_start(mc->watcher);
# endif
  }
}
Example #21
0
File: udp.c Project: nuxleus/libuv
int uv_udp_recv_start(uv_udp_t* handle,
                      uv_alloc_cb alloc_cb,
                      uv_udp_recv_cb recv_cb) {
  if (alloc_cb == NULL || recv_cb == NULL) {
    uv__set_artificial_error(handle->loop, UV_EINVAL);
    return -1;
  }

  if (ev_is_active(&handle->read_watcher)) {
    uv__set_artificial_error(handle->loop, UV_EALREADY);
    return -1;
  }

  if (uv__udp_maybe_deferred_bind(handle, AF_INET))
    return -1;

  handle->alloc_cb = alloc_cb;
  handle->recv_cb = recv_cb;
  uv__udp_start_read_watcher(handle);

  return 0;
}
Example #22
0
static void
watch_toggle(DBusWatch *watch, void *data)
{
	struct watch *w;

	(void)data;

	lem_debug("watch = %p, fd = %d, flags = %s, enabled = %s",
	          (void *)watch,
		  dbus_watch_get_unix_fd(watch),
	          dbus_watch_get_flags(watch) & DBUS_WATCH_READABLE ? "READ" : "WRITE",
		  dbus_watch_get_enabled(watch) ? "true" : "false");

	w = dbus_watch_get_data(watch);
	if (dbus_watch_get_enabled(watch)) {
		if (ev_is_active(&w->ev))
			ev_io_stop(EV_G_ &w->ev);

		ev_io_set(EV_G_ &w->ev, w->ev.fd,
		          flags_to_revents(dbus_watch_get_flags(watch)));
		ev_io_start(EV_G_ &w->ev);
	} else
		ev_io_stop(EV_G_ &w->ev);
}
Example #23
0
void
worker_stop(EV_P_ struct worker *worker)
{
    LOGF(3, "=== %d: worker stopped\n", worker_pid(worker));

    if (worker->f_alive) {
        LOGF(1, "=== %d: worker still alive - sending SIGKILL\n",
             worker_pid(worker));
        kill(worker_pid(worker), SIGKILL);
    }
    writeq_uninit(&worker->stdin_writeq);
    writeq_uninit(&worker->msgin_writeq);
    if (ev_is_active(&worker->child_watcher)) {
        ev_child_stop(EV_A_ &worker->child_watcher);
    }
    if (ev_is_active(&worker->stdin_w)) {
        ev_io_stop(EV_A_ &worker->stdin_w);
    }
    if (ev_is_active(&worker->stdout_w)) {
        ev_io_stop(EV_A_ &worker->stdout_w);
    }
    if (ev_is_active(&worker->stderr_w)) {
        ev_io_stop(EV_A_ &worker->stderr_w);
    }
    if (ev_is_active(&worker->msgin_w)) {
        ev_io_stop(EV_A_ &worker->msgin_w);
    }
    if (ev_is_active(&worker->msgout_w)) {
        ev_io_stop(EV_A_ &worker->msgout_w);
    }
    close(worker->stdin_w.fd);
    close(worker->stdout_w.fd);
    close(worker->stderr_w.fd);
    close(worker->msgin_w.fd);
    close(worker->msgout_w.fd);
    free(worker);
}
Example #24
0
File: core.c Project: Maxence/node
void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
  uv_async_t* async;
  uv_stream_t* stream;
  uv_process_t* process;

  handle->close_cb = close_cb;

  switch (handle->type) {
    case UV_NAMED_PIPE:
      uv_pipe_cleanup((uv_pipe_t*)handle);
      /* Fall through. */

    case UV_TTY:
    case UV_TCP:
      stream = (uv_stream_t*)handle;

      uv_read_stop(stream);
      ev_io_stop(stream->loop->ev, &stream->write_watcher);

      uv__close(stream->fd);
      stream->fd = -1;

      if (stream->accepted_fd >= 0) {
        uv__close(stream->accepted_fd);
        stream->accepted_fd = -1;
      }

      assert(!ev_is_active(&stream->read_watcher));
      assert(!ev_is_active(&stream->write_watcher));
      break;

    case UV_UDP:
      uv__udp_start_close((uv_udp_t*)handle);
      break;

    case UV_PREPARE:
      uv_prepare_stop((uv_prepare_t*) handle);
      break;

    case UV_CHECK:
      uv_check_stop((uv_check_t*) handle);
      break;

    case UV_IDLE:
      uv_idle_stop((uv_idle_t*) handle);
      break;

    case UV_ASYNC:
      async = (uv_async_t*)handle;
      ev_async_stop(async->loop->ev, &async->async_watcher);
      ev_ref(async->loop->ev);
      break;

    case UV_TIMER:
      uv_timer_stop((uv_timer_t*)handle);
      break;

    case UV_PROCESS:
      process = (uv_process_t*)handle;
      ev_child_stop(process->loop->ev, &process->child_watcher);
      break;

    case UV_FS_EVENT:
      uv__fs_event_destroy((uv_fs_event_t*)handle);
      break;

    default:
      assert(0);
  }

  handle->flags |= UV_CLOSING;

  /* This is used to call the on_close callback in the next loop. */
  ev_idle_start(handle->loop->ev, &handle->next_watcher);
  ev_feed_event(handle->loop->ev, &handle->next_watcher, EV_IDLE);
  assert(ev_is_pending(&handle->next_watcher));
}
Example #25
0
File: core.c Project: Maxence/node
void uv__finish_close(uv_handle_t* handle) {
  uv_loop_t* loop = handle->loop;

  assert(handle->flags & UV_CLOSING);
  assert(!(handle->flags & UV_CLOSED));
  handle->flags |= UV_CLOSED;

  switch (handle->type) {
    case UV_PREPARE:
      assert(!ev_is_active(&((uv_prepare_t*)handle)->prepare_watcher));
      break;

    case UV_CHECK:
      assert(!ev_is_active(&((uv_check_t*)handle)->check_watcher));
      break;

    case UV_IDLE:
      assert(!ev_is_active(&((uv_idle_t*)handle)->idle_watcher));
      break;

    case UV_ASYNC:
      assert(!ev_is_active(&((uv_async_t*)handle)->async_watcher));
      break;

    case UV_TIMER:
      assert(!ev_is_active(&((uv_timer_t*)handle)->timer_watcher));
      break;

    case UV_NAMED_PIPE:
    case UV_TCP:
    case UV_TTY:
      assert(!ev_is_active(&((uv_stream_t*)handle)->read_watcher));
      assert(!ev_is_active(&((uv_stream_t*)handle)->write_watcher));
      assert(((uv_stream_t*)handle)->fd == -1);
      uv__stream_destroy((uv_stream_t*)handle);
      break;

    case UV_UDP:
      uv__udp_finish_close((uv_udp_t*)handle);
      break;

    case UV_PROCESS:
      assert(!ev_is_active(&((uv_process_t*)handle)->child_watcher));
      break;

    case UV_FS_EVENT:
      break;

    default:
      assert(0);
      break;
  }

  ev_idle_stop(loop->ev, &handle->next_watcher);

  if (handle->close_cb) {
    handle->close_cb(handle);
  }

  ev_unref(loop->ev);
}
Example #26
0
int uv__io_active(uv__io_t* handle) {
  return ev_is_active(&handle->io_watcher);
}
Example #27
0
F_NONNULL
static void mon_interval_cb(struct ev_loop* loop, struct ev_timer* t, const int revents V_UNUSED) {
    dmn_assert(loop); dmn_assert(t);
    dmn_assert(revents == EV_TIMER);

    tcp_events_t* md = t->data;

    dmn_assert(md);

    if(md->tcp_state != TCP_STATE_WAITING) {
        log_warn("plugin_tcp_connect: A monitoring request attempt seems to have "
            "lasted longer than the monitoring interval. "
            "Skipping this round of monitoring - are you "
            "starved for CPU time?");
        return;
    }

    dmn_assert(md->sock == -1);
    dmn_assert(!ev_is_active(md->connect_watcher));
    dmn_assert(!ev_is_active(md->timeout_watcher) && !ev_is_pending(md->timeout_watcher));

    log_debug("plugin_tcp_connect: Starting state poll of %s", md->desc);

    const bool isv6 = md->addr.sa.sa_family == AF_INET6;

    const int sock = socket(isv6 ? PF_INET6 : PF_INET, SOCK_STREAM, gdnsd_getproto_tcp());
    if(sock == -1) {
        log_err("plugin_tcp_connect: Failed to create monitoring socket: %s", dmn_logf_errno());
        return;
    }

    if(fcntl(sock, F_SETFL, (fcntl(sock, F_GETFL, 0)) | O_NONBLOCK) == -1) {
        log_err("plugin_tcp_connect: Failed to set O_NONBLOCK on monitoring socket: %s", dmn_logf_errno());
        close(sock);
        return;
    }

    bool success = false;
    if(likely(connect(sock, &md->addr.sa, md->addr.len) == -1)) {
        switch(errno) {
            case EINPROGRESS:
                // this is the normal case, where nonblock connect
                //   wants us to wait for writability...
                md->sock = sock;
                md->tcp_state = TCP_STATE_CONNECTING;
                ev_io_set(md->connect_watcher, sock, EV_WRITE);
                ev_io_start(loop, md->connect_watcher);
                ev_timer_set(md->timeout_watcher, md->tcp_svc->timeout, 0);
                ev_timer_start(loop, md->timeout_watcher);
                return; // don't do socket/status finishing actions below...
                break; // redundant
            case EPIPE:
            case ECONNREFUSED:
            case ETIMEDOUT:
            case EHOSTUNREACH:
            case EHOSTDOWN:
            case ENETUNREACH:
                // fast remote failures, e.g. when remote is local, I hope
                log_debug("plugin_tcp_connect: State poll of %s failed very quickly", md->desc);
                break;
            default:
                log_err("plugin_tcp_connect: Failed to connect() monitoring socket to remote server, possible local problem: %s", dmn_logf_errno());
        }
    }
    else {
        success = true;
    }

    close(sock);
    gdnsd_mon_state_updater(md->idx, success);
}
Example #28
0
F_NONNULL
static void mon_write_cb(struct ev_loop* loop, struct ev_io* io, const int revents V_UNUSED) {
    dmn_assert(loop); dmn_assert(io);
    dmn_assert(revents == EV_WRITE);

    http_events_t* md = (http_events_t*)io->data;

    dmn_assert(md);
    dmn_assert(md->hstate == HTTP_STATE_WRITING);
    dmn_assert(!ev_is_active(md->read_watcher));
    dmn_assert(ev_is_active(md->write_watcher));
    dmn_assert(ev_is_active(md->timeout_watcher));
    dmn_assert(md->sock > -1);

    int sock = md->sock;
    if(likely(!md->already_connected)) {
        // nonblocking connect() just finished, need to check status
        int so_error = 0;
        unsigned int so_error_len = sizeof(so_error);
        (void)getsockopt(sock, SOL_SOCKET, SO_ERROR, &so_error, &so_error_len);
        if(unlikely(so_error)) {
            switch(so_error) {
                case EPIPE:
                case ECONNREFUSED:
                case ETIMEDOUT:
                case EHOSTUNREACH:
                case EHOSTDOWN:
                case ENETUNREACH:
                    break;
                default:
                    log_err("plugin_http_status: Failed to connect() monitoring socket to remote server, possible local problem: %s", logf_errnum(so_error));
            }

            log_debug("plugin_http_status: State poll of %s failed quickly: %s", md->smgr->desc, logf_errnum(so_error));
            close(sock); md->sock = -1;
            ev_io_stop(loop, md->write_watcher);
            ev_timer_stop(loop, md->timeout_watcher);
            md->hstate = HTTP_STATE_WAITING;
            gdnsd_mon_state_updater(md->smgr, false);
            return;
        }
        md->already_connected = true;
    }

    const unsigned to_send = md->http_svc->req_data_len - md->done;
    const int sent = send(sock, md->http_svc->req_data + md->done, md->http_svc->req_data_len, 0);
    if(unlikely(sent == -1)) {
        switch(errno) {
            case EAGAIN:
            case EINTR:
                return;
            case ENOTCONN:
            case ECONNRESET:
            case ETIMEDOUT:
            case EHOSTUNREACH:
            case ENETUNREACH:
            case EPIPE:
                break;
            default:
                log_err("plugin_http_status: write() to monitoring socket failed, possible local problem: %s", logf_errno());
        }
        shutdown(sock, SHUT_RDWR);
        close(sock);
        md->sock = -1;
        ev_io_stop(loop, md->write_watcher);
        ev_timer_stop(loop, md->timeout_watcher);
        md->hstate = HTTP_STATE_WAITING;
        gdnsd_mon_state_updater(md->smgr, false);
    }
    if(unlikely(sent != (signed)to_send)) {
        md->done += sent;
        return;
    }

    md->done = 0;
    md->hstate = HTTP_STATE_READING;
    ev_io_stop(loop, md->write_watcher);
    ev_io_set(md->read_watcher, sock, EV_READ);
    ev_io_start(loop, md->read_watcher);
}
Example #29
0
F_NONNULL
static void mon_interval_cb(struct ev_loop* loop, struct ev_timer* t, const int revents V_UNUSED) {
    dmn_assert(loop); dmn_assert(t);
    dmn_assert(revents == EV_TIMER);

    http_events_t* md = (http_events_t*)t->data;

    dmn_assert(md);

    if(unlikely(md->hstate != HTTP_STATE_WAITING)) {
        log_warn("plugin_http_status: A monitoring request attempt seems to have "
            "lasted longer than the monitoring interval. "
            "Skipping this round of monitoring - are you "
            "starved for CPU time?");
        return;
    }

    dmn_assert(md->sock == -1);
    dmn_assert(!ev_is_active(md->read_watcher));
    dmn_assert(!ev_is_active(md->write_watcher));
    dmn_assert(!ev_is_active(md->timeout_watcher));

    log_debug("plugin_http_status: Starting state poll of %s", md->smgr->desc);

    do {
        const bool isv6 = md->addr.sa.sa_family == AF_INET6;

        const int sock = socket(isv6 ? PF_INET6 : PF_INET, SOCK_STREAM, gdnsd_getproto_tcp());
        if(unlikely(sock < 0)) {
            log_err("plugin_http_status: Failed to create monitoring socket: %s", logf_errno());
            break;
        }

        if(unlikely(fcntl(sock, F_SETFL, (fcntl(sock, F_GETFL, 0)) | O_NONBLOCK) == -1)) {
            log_err("plugin_http_status: Failed to set O_NONBLOCK on monitoring socket: %s", logf_errno());
            close(sock);
            break;
        }

        md->already_connected = true;
        if(likely(connect(sock, &md->addr.sa, md->addr.len) == -1)) {
            if(likely(errno == EINPROGRESS)) { md->already_connected = false; }
            else {
                switch(errno) {
                    case EPIPE:
                    case ECONNREFUSED:
                    case ETIMEDOUT:
                    case EHOSTUNREACH:
                    case EHOSTDOWN:
                    case ENETUNREACH:
                        break;
                    default:
                        log_err("plugin_http_status: Failed to connect() monitoring socket to remote server, possible local problem: %s", logf_errno());
                }
                close(sock);
                break;
            }
        }

        md->sock = sock;
        md->hstate = HTTP_STATE_WRITING;
        md->done = 0;
        ev_io_set(md->write_watcher, sock, EV_WRITE);
        ev_io_start(loop, md->write_watcher);
        ev_timer_set(md->timeout_watcher, md->http_svc->timeout, 0);
        ev_timer_start(loop, md->timeout_watcher);
        return;
    } while(0);

    // This is only reachable via "break"'s above, which indicate an immediate failure
    log_debug("plugin_http_status: State poll of %s failed very quickly", md->smgr->desc);
    md->hstate = HTTP_STATE_WAITING;
    gdnsd_mon_state_updater(md->smgr, false);
}
Example #30
0
void uv__write(uv_handle_t* handle) {
    assert(handle->fd >= 0);

    /* TODO: should probably while(1) here until EAGAIN */

    /* Get the request at the head of the queue. */
    uv_req_t* req = uv_write_queue_head(handle);
    if (!req) {
        assert(handle->write_queue_size == 0);
        uv__drain(handle);
        return;
    }

    assert(req->handle == handle);

    /* Cast to iovec. We had to have our own uv_buf_t instead of iovec
     * because Windows's WSABUF is not an iovec.
     */
    assert(sizeof(uv_buf_t) == sizeof(struct iovec));
    struct iovec* iov = (struct iovec*) &(req->bufs[req->write_index]);
    int iovcnt = req->bufcnt - req->write_index;

    /* Now do the actual writev. Note that we've been updating the pointers
     * inside the iov each time we write. So there is no need to offset it.
     */

    ssize_t n = writev(handle->fd, iov, iovcnt);

    uv_write_cb cb = req->cb;

    if (n < 0) {
        if (errno != EAGAIN) {
            uv_err_t err = uv_err_new(handle, errno);

            /* XXX How do we handle the error? Need test coverage here. */
            uv_close(handle);

            if (cb) {
                cb(req, -1);
            }
            return;
        }
    } else {
        /* Successful write */

        /* The loop updates the counters. */
        while (n > 0) {
            uv_buf_t* buf = &(req->bufs[req->write_index]);
            size_t len = buf->len;

            assert(req->write_index < req->bufcnt);

            if (n < len) {
                buf->base += n;
                buf->len -= n;
                handle->write_queue_size -= n;
                n = 0;

                /* There is more to write. Break and ensure the watcher is pending. */
                break;

            } else {
                /* Finished writing the buf at index req->write_index. */
                req->write_index++;

                assert(n >= len);
                n -= len;

                assert(handle->write_queue_size >= len);
                handle->write_queue_size -= len;

                if (req->write_index == req->bufcnt) {
                    /* Then we're done! */
                    assert(n == 0);

                    /* Pop the req off handle->write_queue. */
                    ngx_queue_remove(&req->queue);
                    free(req->bufs); /* FIXME: we should not be allocing for each read */
                    req->bufs = NULL;

                    /* NOTE: call callback AFTER freeing the request data. */
                    if (cb) {
                        cb(req, 0);
                    }

                    if (!ngx_queue_empty(&handle->write_queue)) {
                        assert(handle->write_queue_size > 0);
                    } else {
                        /* Write queue drained. */
                        uv__drain(handle);
                    }

                    return;
                }
            }
        }
    }

    /* Either we've counted n down to zero or we've got EAGAIN. */
    assert(n == 0 || n == -1);

    /* We're not done yet. */
    assert(ev_is_active(&handle->write_watcher));
    ev_io_start(EV_DEFAULT_ &handle->write_watcher);
}