Пример #1
0
static void uv__io_rw(struct ev_loop* ev, ev_io* w, int events) {
  union { void* data; uv__io_cb cb; } u;
  uv_loop_t* loop = ev_userdata(ev);
  uv__io_t* handle = container_of(w, uv__io_t, io_watcher);
  u.data = handle->io_watcher.data;
  u.cb(loop, handle, events & (EV_READ|EV_WRITE|EV_ERROR));
}
Пример #2
0
static void
ev_io_on_request(struct ev_loop* mainloop, ev_io* watcher, const int events)
{
  int client_fd;
  struct sockaddr_in sockaddr;
  socklen_t addrlen;

  addrlen = sizeof(struct sockaddr_in);
  client_fd = accept(watcher->fd, (struct sockaddr*)&sockaddr, &addrlen);
  if(client_fd < 0) {
    DBG("Could not accept() client: errno %d", errno);
    return;
  }

  int flags = fcntl(client_fd, F_GETFL, 0);
  if(fcntl(client_fd, F_SETFL, (flags < 0 ? 0 : flags) | O_NONBLOCK) == -1) {
    DBG("Could not set_nonblocking() client %d: errno %d", client_fd, errno);
    return;
  }

  Request* request = Request_new(
    ((ThreadInfo*)ev_userdata(mainloop))->server_info,
    client_fd,
    inet_ntoa(sockaddr.sin_addr)
  );

  DBG_REQ(request, "Accepted client %s:%d on fd %d",
          inet_ntoa(sockaddr.sin_addr), ntohs(sockaddr.sin_port), client_fd);

  ev_io_init(&request->ev_watcher, &ev_io_on_read,
             client_fd, EV_READ);
  ev_io_start(mainloop, &request->ev_watcher);
}
Пример #3
0
static void fd_cb (struct ev_loop *loop, ev_io *iow, int revents)
{
    struct flux_watcher *w = iow->data;
    assert (w->signature == FD_SIG);
    if (w->fn)
        w->fn (ev_userdata (loop), w, libev_to_events (revents), w->arg);
}
Пример #4
0
static void
S5_server_cb (EV_P_ ev_io *ev, int revents)
{
    struct sockaddr_in sin;
    int lv;
    struct S5srv_t *root;
    struct S5tun_t *tun;
    ev_io *eve;
    socklen_t sin_sz;
    /* get root node from ev_loop */
    root = ev_userdata (EV_A);
    /* accept socket */
    sin_sz = sizeof (struct sockaddr_in);
    lv = accept (root->fd, (struct sockaddr*)&sin, &sin_sz);
    if (lv == -1)
    {
        perror ("accept");
        return;
    }
    /* create new node */
    tun = S5_get_tun (root, -1);
    if (!tun)
    {
        close (lv);
        return;
    }
    /* update state && s5-fd */
    tun->state ++;
    tun->_s[0].fd = lv;
    /* create event's bindings */
    eve = &(tun->_s[0].evio);
    ev_io_init (eve, clipair_dispatch_cb, tun->_s[0].fd, EV_READ);
    ev_io_start (EV_A_ eve);
}
Пример #5
0
static void stat_cb (struct ev_loop *loop, ev_stat *sw, int revents)
{
    struct flux_watcher *w = sw->data;
    assert (w->signature == STAT_SIG);
    if (w->fn)
        w->fn (ev_userdata (loop), w, libev_to_events (revents), w->arg);
}
Пример #6
0
/**
 * Invoked to handle async notifications via the thread pipes
 */
static void handle_worker_notification(ev_loop *lp, ev_io *watcher, int ready_events) {
    // Get the user data
    worker_ev_userdata *data = ev_userdata(lp);

    // Attempt to read a single character from the pipe
    char cmd;
    if (read(data->pipefd[0], &cmd, 1) != 1)
        return;

    // Handle the command
    conn_info *conn;
    switch (cmd) {
        // Accept new connection
        case 'a':
            // Read the address of conn from the pipe
            if (read(data->pipefd[0], &conn, sizeof(conn_info*)) < 0) {
                perror("Failed to read from async pipe");
                return;
            }

            // Schedule this connection on this thread
            conn->thread_ev = data;
            ev_io_start(data->loop, &conn->client);
            break;

        // Quit
        case 'q':
            ev_break(lp, EVBREAK_ALL);
            break;

        default:
            syslog(LOG_WARNING, "Received unknown comand: %c", cmd);
    }
}
Пример #7
0
static void idle_cb (struct ev_loop *loop, ev_idle *iw, int revents)
{
    struct flux_watcher *w = iw->data;
    assert (w->signature == IDLE_SIG);
    if (w->fn)
        w->fn (ev_userdata (loop), w, libev_to_events (revents), w->arg);
}
Пример #8
0
static void child_cb (struct ev_loop *loop, ev_child *cw, int revents)
{
    struct flux_watcher *w = cw->data;
    assert (w->signature == CHILD_SIG);
    if (w->fn)
        w->fn (ev_userdata (loop), w, libev_to_events (revents), w->arg);
}
Пример #9
0
static void prepare_cb (struct ev_loop *loop, ev_prepare *pw, int revents)
{
    struct flux_watcher *w = pw->data;
    assert (w->signature == PREPARE_SIG);
    if (w->fn)
        w->fn (ev_userdata (loop), w, libev_to_events (revents), w->arg);
}
Пример #10
0
static void periodic_cb (struct ev_loop *loop, ev_periodic *pw, int revents)
{
    struct f_periodic *fp = pw->data;
    struct flux_watcher *w = fp->w;
    if (w->fn)
        fp->w->fn (ev_userdata (loop), w, libev_to_events (revents), w->arg);
}
Пример #11
0
static void zmq_cb (struct ev_loop *loop, ev_zmq *pw, int revents)
{
    struct flux_watcher *w = pw->data;
    assert (w->signature == ZMQ_SIG);
    if (w->fn)
        w->fn (ev_userdata (loop), w, libev_to_events (revents), w->arg);
}
Пример #12
0
/**
 * Called when a message is sent to netconf->loop_async.
 * This is usually to signal that some internal control
 * flow related to the event loop needs to take place.
 * For example, we might need to re-enable some ev_io* watchers,
 * or exit the loop.
 */
static void handle_async_event(ev_async *watcher, int revents) {
    // Get the user data
    worker_ev_userdata *data = ev_userdata();

    // Get a reference to the head, set the head to NULL
    async_event *event = (async_event*)data->netconf->events;
    data->netconf->events = NULL;

    async_event *next;
    while (event) {
        // Handle based on the event
        switch (event->event_type) {
            case EXIT:
                ev_break(EVBREAK_ALL);
                break;

            case SCHEDULE_WATCHER:
                ev_io_start(event->watcher);
                break;

            default:
                syslog(LOG_ERR, "Unknown async event type!");
                break;
        }

        // Grab the next event, free this one, and repeat
        next = event->next;
        free(event);
        event = next;
    }
}
Пример #13
0
static void
clipair_dispatch_cb (EV_P_ ev_io *ev, int revents)
{
    struct S5srv_t *root;
    struct S5tun_t *self;
    root = ev_userdata (EV_A);
    if (!root)
        return;
    self = S5_get_tun (root, ev->fd);
    if (!self)
    {
        /* TODO? exception try to remove from event list */
        return;
    }
    if (ev->fd == self->_s[0].fd)
    {
        /* execute code */
        if (revents & EV_READ)
            clipair_S5_rd_cb (EV_A_ ev, self);
        if (revents & EV_WRITE)
            clipair_wr_cb (EV_A_ ev, self, 0);
    }
    else if (ev->fd == self->_s[1].fd)
    {
        if (revents & EV_READ)
            clipair_DC_rd_cb (EV_A_ ev, self);
        if (revents & EV_WRITE)
            clipair_wr_cb (EV_A_ ev, self, 1);
    }
    else
    {
        /* TODO: remove from event list */
    }
}
Пример #14
0
void on_connect(EV_P_ struct ev_io *io, int revents) {
    while (1) {
        struct sockaddr_in client_addr;
        socklen_t len = sizeof (struct sockaddr_in);
        int client_sock = accept(io->fd, (struct sockaddr *) &client_addr, &len);
        if (client_sock >= 0) {

            if (set_nonblock(client_sock) == -1) {
                shutdown_printerr(client_sock, "can't set the socket mode O_NONBLOCK for client\n");
                return;
            }

            if (set_linger(client_sock) == -1) {
                shutdown_printerr(client_sock, "can't set SO_LINGER sock option for client\n");
                return;
            }

            if (set_keepalive(client_sock) == -1) {
                shutdown_printerr(client_sock, "can't set SO_KEEPALIVE sock option for client\n");
                return;
            }

            server_ctx_t *srv_ctx = (server_ctx_t *) ev_userdata(loop);
            client_ctx_t* cli_ctx = get_client_ctx(srv_ctx);
            cli_ctx->io.ctx = cli_ctx;
            cli_ctx->connected_at = time(NULL);
            uuid_generate(cli_ctx->uuid);
            memcpy(&cli_ctx->client_addr, &client_addr, sizeof (struct sockaddr_in));
            ev_io_init((ev_io *) & cli_ctx->io, client_read_write, client_sock, EV_READ);
            ev_io_start(loop, (ev_io *) & cli_ctx->io);
            char time_buff[32];
            strftime(time_buff, sizeof (time_buff), "%Y-%m-%d %H:%M:%S %Z", localtime(&cli_ctx->connected_at));
            char *addr = inet_ntoa(cli_ctx->client_addr.sin_addr);
            char uuid_buff[37];
            uuid_unparse_lower(cli_ctx->uuid, (char *) &uuid_buff);
            printf("client accepted %s:%hu %s at %s\n", addr, client_addr.sin_port, &uuid_buff, &time_buff);
            char *welcome_msg = server_welcome(srv_ctx, cli_ctx);
            send_message(loop, cli_ctx->uuid, welcome_msg, strlen(welcome_msg));
            free(welcome_msg);
            char *new_client_msg = server_client_connected(cli_ctx);
            for (ssize_t i = 0; i < srv_ctx->clients_count; i++) {
                if (uuid_compare(srv_ctx->clients[i]->uuid, cli_ctx->uuid) != 0) {
                    send_message(loop, srv_ctx->clients[i]->uuid, new_client_msg, strlen(new_client_msg));
                }
            }
            free(new_client_msg);
        } else {
            if (errno == EAGAIN)
                return;
            if (errno == EMFILE || errno == ENFILE) {
                fprintf(stderr, "out of file descriptors\n");
                return;
            } else if (errno != EINTR) {
                fprintf(stderr, "accept connections error\n");
                return;
            }
        }
    }
}
Пример #15
0
void LinkageWorker::writable_cb(struct ev_loop *loop, struct ev_io *w, int /*revents*/)
{
    LinkageWorker *worker = reinterpret_cast<LinkageWorker *>(ev_userdata(loop));
    unsigned char *p = reinterpret_cast<unsigned char *>(w);
    p -= offsetof(struct client_t, ev_io_w);
    struct client_t *client = reinterpret_cast<struct client_t *>(p);
    worker->OnWritable(client);
}
Пример #16
0
void LinkageWorker::timer_cb(struct ev_loop *loop, struct ev_timer *w, int /*revents*/)
{
    LinkageWorker *worker = reinterpret_cast<LinkageWorker *>(ev_userdata(loop));
    unsigned char *p = reinterpret_cast<unsigned char *>(w);
    p -= offsetof(struct timer_t, ev_timer);
    struct timer_t *timer = reinterpret_cast<struct timer_t *>(p);
    worker->OnTimer(timer);
}
Пример #17
0
static void send_callback(EV_P_ ev_io *w, int tev)
{
	blizzard::server *s = (blizzard::server *) ev_userdata(loop);
	blizzard::events *e = memberof(blizzard::events, watcher_send, w);
	blizzard::http *con = e->con;

	con->allow_write();
	s->process(con);
}
static void
cb_acquire (ev_loop *ev_loop)
{
    MilterEventLoop *loop;
    MilterLibevEventLoopPrivate *priv;

    loop = ev_userdata(ev_loop);
    priv = MILTER_LIBEV_EVENT_LOOP_GET_PRIVATE(loop);
    priv->acquire_func(loop, priv->release_data);
}
Пример #19
0
static void
ev_signal_on_sigint(struct ev_loop* mainloop, ev_signal* watcher, const int events)
{
  /* Clean up and shut down this thread.
   * (Shuts down the Python interpreter if this is the main thread) */
  ev_cleanup* cleanup_watcher = malloc(sizeof(ev_cleanup));
  ev_cleanup_init(cleanup_watcher, pyerr_set_interrupt);
  ev_cleanup_start(mainloop, cleanup_watcher);

  ev_io_stop(mainloop, &((ThreadInfo*)ev_userdata(mainloop))->accept_watcher);
  ev_signal_stop(mainloop, watcher);
}
Пример #20
0
/**
 * Invoked when a UDP connection has a message ready to be read.
 * We need to take care to add the data to our buffers, and then
 * invoke the connection handlers who have the business logic
 * of what to do.
 */
static void handle_udp_message(ev_io *watch, int ready_events) {
    while (1) {
        // Get the associated connection struct
        conn_info *conn = watch->data;

        // Clear the input buffer
        circbuf_clear(&conn->input);

        // Build the IO vectors to perform the read
        struct iovec vectors[2];
        int num_vectors;
        circbuf_setup_readv_iovec(&conn->input, (struct iovec*)&vectors, &num_vectors);

        /*
         * Issue the read, always use the first vector.
         * since we just cleared the buffer, and it should
         * be a contiguous buffer.
         */
        assert(num_vectors == 1);
        ssize_t read_bytes = recv(watch->fd, vectors[0].iov_base,
                                    vectors[0].iov_len, 0);

        // Make sure we actually read something
        if (read_bytes == 0) {
            syslog(LOG_DEBUG, "Got empty UDP packet. [%d]\n", watch->fd);
            return;

        } else if (read_bytes == -1) {
            if (errno != EAGAIN && errno != EINTR) {
                syslog(LOG_ERR, "Failed to recv() from connection [%d]! %s.",
                        watch->fd, strerror(errno));
            }
            return;
        }

        // Update the write cursor
        circbuf_advance_write(&conn->input, read_bytes);

        // UDP clients don't need to append newlines to the messages like
        // TCP clients do, but our parser requires them.  Append one if
        // it's not present.
        if (conn->input.buffer[conn->input.write_cursor - 1] != '\n')
            circbuf_write(&conn->input, "\n", 1);

        // Get the user data
        worker_ev_userdata *data = ev_userdata();

        // Invoke the connection handler
        statsite_conn_handler handle = {data->netconf->config, watch->data};
        handle_client_connect(&handle);
    }
}
Пример #21
0
/**
 * Invoked periodically to give the connection handlers
 * time to cleanup and handle state updates
 */
static void handle_periodic_timeout(ev_loop *lp, ev_timer *t, int ready_events) {
    // Get the user data
    worker_ev_userdata *data = ev_userdata(lp);

    // Prepare to invoke the handler
    bloom_conn_handler handle;
    handle.config = data->netconf->config;
    handle.mgr = data->netconf->mgr;
    handle.conn = NULL;

    // Invoke the connection handler layer
    periodic_update(&handle);
}
Пример #22
0
/**
 * Called when an event is ready to be processed by libev.
 * We need to do _very_ little work here. Basically just
 * setup the userdata to process the event and return.
 */
static void prepare_event(ev_io *watcher, int revents) {
    // Get the user data
    worker_ev_userdata *data = ev_userdata();

    // Set everything if we don't have a watcher
    if (!data->watcher) {
        data->watcher = watcher;
        data->ready_events = revents;

        // Stop listening for now
        ev_io_stop(watcher);
    }
}
Пример #23
0
void Redox::submitCommandCallback(struct ev_loop *loop, ev_timer *timer, int revents) {

  Redox *rdx = (Redox *)ev_userdata(loop);
  long id = (long)timer->data;

  Command<ReplyT> *c = rdx->findCommand<ReplyT>(id);
  if (c == nullptr) {
    rdx->logger_.error() << "Couldn't find Command " << id
                         << " in command_map (submitCommandCallback).";
    return;
  }

  submitToServer<ReplyT>(c);
}
Пример #24
0
static void uv__ares_io(struct ev_loop* ev, struct ev_io* watcher,
    int revents) {
  uv_loop_t* loop = ev_userdata(ev);

  assert(ev == loop->ev);

  /* Reset the idle timer */
  uv_timer_again(&loop->timer);

  /* Process DNS responses */
  ares_process_fd(loop->channel,
      revents & EV_READ ? watcher->fd : ARES_SOCKET_BAD,
      revents & EV_WRITE ? watcher->fd : ARES_SOCKET_BAD);
}
Пример #25
0
/* loop pending callback */
static void
callback_Loop(ev_loop *loop)
{
    PyGILState_STATE gstate = PyGILState_Ensure();
    Loop *self = ev_userdata(loop);
    PyObject *result;

    result = PyObject_CallFunctionObjArgs(self->callback, self, NULL);
    if (!result) {
        PYEV_EXIT_LOOP(loop);
    }
    else {
        Py_DECREF(result);
    }
    PyGILState_Release(gstate);
}
Пример #26
0
/**
 * Reads the thread specific userdata to figure out what
 * we need to handle. Things that purely effect the network
 * stack should be handled here, but otherwise we should defer
 * to the connection handlers.
 */
static void invoke_event_handler(ev_io *watcher, int ready_events) {
    // Get the user data
    worker_ev_userdata *data = ev_userdata();

    // Read in the data, and close on issues
    conn_info *conn = watcher->data;
    if (read_client_data(conn)) {
        close_client_connection(conn);
        return;
    }

    // Invoke the connection handler, and close connection on error
    statsite_conn_handler handle = {data->netconf->config, watcher->data};
    if (handle_client_connect(&handle))
        close_client_connection(conn);
}
Пример #27
0
/**
 * Implements the callback function on all the watcher objects.  This
 * will be indirectly called by the libev event loop implementation.
 *
 * TODO: Custom error handlers?  Currently, any error in a callback
 * will print the error to stderr and things will "go on".
 *
 * [+0, -0, m]
 */
static void watcher_cb(struct ev_loop *loop, void *watcher, int revents) {
    lua_State* L       = ev_userdata(loop);
    void*      objs[3] = { loop, watcher, NULL };
    int        result;

    lua_pushcfunction(L, traceback);

    result = lua_checkstack(L, 5);
    assert(result != 0 /* able to allocate enough space on lua stack */);
    result = push_objs(L, objs);
    assert(result == 2 /* pushed two objects on the lua stack */);
    assert(!lua_isnil(L, -2) /* the loop obj was resolved */);
    assert(!lua_isnil(L, -1) /* the watcher obj was resolved */);

    /* STACK: <traceback>, <loop>, <watcher> */

    if ( !ev_is_active(watcher) ) {
        /* Must remove "stop"ed watcher from loop: */
        loop_stop_watcher(L, -2, -1);
    }

    lua_getfenv(L, -1);
    assert(lua_istable(L, -1) /* The watcher fenv was found */);
    lua_rawgeti(L, -1, WATCHER_FN);
    if ( lua_isnil(L, -1) ) {
        /* The watcher function was set to nil, so do nothing */
        lua_pop(L, 5);
        return;
    }
    assert(lua_isfunction(L, -1) /* watcher function is a function */);

    /* STACK: <traceback>, <loop>, <watcher>, <watcher fenv>, <watcher fn> */

    lua_insert(L, -4);
    lua_pop(L, 1);
    lua_pushinteger(L, revents);

    /* STACK: <traceback>, <watcher fn>, <loop>, <watcher>, <revents> */
    if ( lua_pcall(L, 3, 0, -5) ) {
        /* TODO: Enable user-specified error handler! */
        fprintf(stderr, "CALLBACK FAILED: %s\n",
                lua_tostring(L, -1));
        lua_pop(L, 2);
    } else {
        lua_pop(L, 1);
    }
}
Пример #28
0
void blizzard::http::add_watcher(struct ev_loop *loop)
{
	blizzard::server *s = (blizzard::server *) ev_userdata(loop);

	e.con = this;

	ev_io_init(&e.watcher_recv, recv_callback, fd, EV_READ);
	ev_io_start(loop, &e.watcher_recv);

	ev_io_init(&e.watcher_send, send_callback, fd, EV_WRITE);

	ev_timer_init(&e.watcher_timeout, timeout_callback, 0, s->config.blz.plugin.connection_timeout / (double) 1000);
	ev_timer_again(loop, &e.watcher_timeout);

	server_loop = loop;
	response_time = ev_now(loop);
}
Пример #29
0
static void
Scheduler_Stop(struct ev_loop *loop, ev_prepare *prepare, int revents)
{
    Scheduler *self = prepare->data;
    ev_periodic_stop(loop, (ev_periodic *)((Watcher *)self)->watcher);
    ev_prepare_stop(loop, prepare);
    PyErr_Restore(self->err_type, self->err_value, self->err_traceback);
    if (self->err_fatal) {
        PYEV_LOOP_EXIT(loop);
    }
    else {
        Loop_WarnOrStop(ev_userdata(loop), self->scheduler);
    }
    self->err_fatal = 0;
    self->err_traceback = NULL;
    self->err_value = NULL;
    self->err_type = NULL;
}
Пример #30
0
static void timeout_callback(EV_P_ ev_timer *w, int tev)
{
	blizzard::server *s = (blizzard::server *) ev_userdata(loop);
	blizzard::events *e = memberof(blizzard::events, watcher_timeout, w);
	blizzard::http *con = e->con;

	log_warn("timeout: is_locked=%d, state=%d, fd=%d", con->is_locked(), con->state(), con->get_fd());

	if (!con->is_locked())
	{
		ev_io_stop(loop, &e->watcher_recv);
		ev_io_stop(loop, &e->watcher_send);
		ev_timer_stop(loop, &e->watcher_timeout);

		con->destroy();
		s->http_pool.free(con);
	}
}