Exemple #1
0
void event_active (struct event *ev, int res, short ncalls)
{
  dLOOPev;

  if (res & EV_TIMEOUT)
    ev_feed_event (EV_A_ &ev->to, res & EV_TIMEOUT);

  if (res & EV_SIGNAL)
    ev_feed_event (EV_A_ &ev->iosig.sig, res & EV_SIGNAL);

  if (res & (EV_READ | EV_WRITE))
    ev_feed_event (EV_A_ &ev->iosig.io, res & (EV_READ | EV_WRITE));
}
Exemple #2
0
static int
close_writer_asap (evcom_writer *writer)
{
  release_write_buffer(writer);
  ev_feed_event(D_LOOP_(writer) &writer->write_watcher, EV_WRITE);
  return close_asap((evcom_descriptor*)writer);
}
Exemple #3
0
static void uv__udp_sendmsg(EV_P_ ev_io* w, int revents) {
  uv_udp_t* handle;

  handle = container_of(w, uv_udp_t, write_watcher);
  assert(handle->type == UV_UDP);
  assert(revents & EV_WRITE);

  assert(!ngx_queue_empty(&handle->write_queue)
      || !ngx_queue_empty(&handle->write_completed_queue));

  /* Write out pending data first. */
  uv__udp_run_pending(handle);

  /* Drain 'request completed' queue. */
  uv__udp_run_completed(handle);

  if (!ngx_queue_empty(&handle->write_completed_queue)) {
    /* Schedule completion callbacks. */
    ev_feed_event(handle->loop->ev, &handle->write_watcher, EV_WRITE);
  }
  else if (ngx_queue_empty(&handle->write_queue)) {
    /* Pending queue and completion queue empty, stop watcher. */
    uv__udp_stop_write_watcher(handle);
  }
}
Exemple #4
0
/**
 * Stops the server. Will not accept new connections.  Does not drop
 * existing connections.
 */
void
evcom_server_close (evcom_server *server)
{
  ev_io_start(D_LOOP_(server) &server->watcher);
  ev_feed_event(D_LOOP_(server) &server->watcher, EV_READ);

  close_asap((evcom_descriptor*)server);
}
Exemple #5
0
void
evcom_reader_close (evcom_reader *reader)
{
  ev_io_start(D_LOOP_(reader) &reader->read_watcher);
  ev_feed_event(D_LOOP_(reader) &reader->read_watcher, EV_READ);

  close_asap((evcom_descriptor*)reader);
}
Exemple #6
0
fd_kill (struct ev_loop *loop, int fd)
{
    ev_io *w;

    while ((w = (ev_io *)loop->anfds [fd].head))
    {
        ev_io_stop (loop, w);
        ev_feed_event (loop, (W)w, EV_ERROR | EV_READ | EV_WRITE);
    }
}
Exemple #7
0
int uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
  uv_tcp_t* tcp;
  uv_async_t* async;
  uv_timer_t* timer;

  handle->close_cb = close_cb;

  switch (handle->type) {
    case UV_TCP:
      tcp = (uv_tcp_t*) handle;
      uv_read_stop((uv_stream_t*)tcp);
      ev_io_stop(EV_DEFAULT_ &tcp->write_watcher);
      break;

    case UV_PREPARE:
      uv_prepare_stop((uv_prepare_t*) handle);
      break;

    case UV_CHECK:
      uv_check_stop((uv_check_t*) handle);
      break;

    case UV_IDLE:
      uv_idle_stop((uv_idle_t*) handle);
      break;

    case UV_ASYNC:
      async = (uv_async_t*)handle;
      ev_async_stop(EV_DEFAULT_ &async->async_watcher);
      ev_ref(EV_DEFAULT_UC);
      break;

    case UV_TIMER:
      timer = (uv_timer_t*)handle;
      if (ev_is_active(&timer->timer_watcher)) {
        ev_ref(EV_DEFAULT_UC);
      }
      ev_timer_stop(EV_DEFAULT_ &timer->timer_watcher);
      break;

    default:
      assert(0);
      return -1;
  }

  uv_flag_set(handle, UV_CLOSING);

  /* This is used to call the on_close callback in the next loop. */
  ev_idle_start(EV_DEFAULT_ &handle->next_watcher);
  ev_feed_event(EV_DEFAULT_ &handle->next_watcher, EV_IDLE);
  assert(ev_is_pending(&handle->next_watcher));

  return 0;
}
Exemple #8
0
static void pinger_cb(struct ev_loop *loop, struct ev_timer *w, int revents) {

	struct bb_pinger *pinger = (struct bb_pinger *) w;

	struct bb_dealer *bbd = pinger->vhost->dealers;
	// get events before starting a potentially long write session
	ev_feed_event(blastbeat.loop, &blastbeat.event_zmq, EV_READ);
	while(bbd) {
		bb_raw_zmq_send_msg(bbd->identity, bbd->len, "", 0, "ping", 4, "", 0);
		bbd = bbd->next;
	}
}
Exemple #9
0
static void pinger_cb(struct ev_loop *loop, struct ev_timer *w, int revents) {

	struct bb_dealer *bbd = blastbeat.dealers;
	// get events before starting a potentially long write session
	ev_feed_event(blastbeat.loop, &blastbeat.event_zmq, EV_READ);
	time_t now = time(NULL);
	while(bbd) {
		if (now - bbd->last_seen > blastbeat.ping_freq) {
			bb_raw_zmq_send_msg(bbd->identity, bbd->len, "", 0, "ping", 4, "", 0);
		}
		bbd = bbd->next;
	}
}
Exemple #10
0
static inline void
fd_event_nocheck (struct ev_loop *loop, int fd, int revents)
{
    ANFD *anfd = loop->anfds + fd;
    ev_io *w;

    for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
    {
        int ev = w->events & revents;

        if (ev)
            ev_feed_event (loop, (W)w, ev);
    }
}
Exemple #11
0
int bb_socketio_push(struct bb_session *bbs, char type, char *buf, size_t len) {

    char *message = bb_alloc(4 + len);
    if (!message) {
        bb_error("malloc()");
        return -1;
    }

    message[0] = type;
    message[1] = ':';
    message[2] = ':';
    message[3] = ':';

    memcpy(message+4, buf, len);

    if (bbs->sio_realtime) {
        return bb_websocket_reply(bbs, message, len+4);
    }


    struct bb_socketio_message *last_bbsm=NULL,*bbsm = bbs->sio_queue;

    while(bbsm) {
        last_bbsm = bbsm;
        bbsm = bbsm->next;
    }

    bbsm = bb_alloc(sizeof(struct bb_socketio_message));
    if (!bbsm) {
        bb_free(message, len+4);
        bb_error("malloc()");
        return -1;
    }
    memset(bbsm, 0, sizeof(struct bb_socketio_message));
    bbsm->buf = message;
    bbsm->len = 4+len;
    if (last_bbsm) {
        last_bbsm->next = bbsm;
    }
    else {
        bbs->sio_queue = bbsm;
    }

    //is a poller attached to the session ?
    if (bbs->sio_poller) {
        ev_feed_event(blastbeat.loop, &bbs->death_timer, EV_TIMER);
    }

    return 0;
}
Exemple #12
0
void
evsock_wakeup (struct evsock *sock, int how) {
    if (how & EVSOCK_HOW_RX) {
        sock->rx.suspend = 0;
    }
    if (how & EVSOCK_HOW_TX) {
        sock->tx.suspend = 0;
    }
    EV_IO_RESET(sock->loop, &sock->w, EVSOCK_NEED_EVENTS(sock));
    if (how & EVSOCK_HOW_RX) {
        if (sock->rx.eof || sock->rx.buf.n || (sock->ssl && SSL_pending(sock->ssl))) {
            ev_feed_event(sock->loop, &sock->w, sock->rx.events);
        }
    }
}
Exemple #13
0
static int stream_feed(struct stream *s, int events)
{
	struct ev_loop *el;
	if (s->loop == NULL) {
		s->errcode = EINVAL;
		return -EINVAL;
	}
	el = s->loop->stream_loop;

	if (events & EV_READ) {
		ev_feed_event(el, &s->stream_watcher[READ_WATCHER], EV_READ);
	}
	if (events & EV_WRITE) {
		ev_feed_event(el, &s->stream_watcher[WRITE_WATCHER], EV_WRITE);
	}
	/*
	 * TODO: possible for multi-loop.
	 */
#if 0
	ev_async_send(el, &s->loop->bell_watcher);
#endif

	return 0;
}
Exemple #14
0
static void uv__write_req_finish(uv_write_t* req) {
  uv_stream_t* stream = req->handle;

  /* Pop the req off tcp->write_queue. */
  ngx_queue_remove(&req->queue);
  if (req->bufs != req->bufsml) {
    free(req->bufs);
  }
  req->bufs = NULL;

  /* Add it to the write_completed_queue where it will have its
   * callback called in the near future.
   */
  ngx_queue_insert_tail(&stream->write_completed_queue, &req->queue);
  ev_feed_event(stream->loop->ev, &stream->write_watcher, EV_WRITE);
}
Exemple #15
0
int uv_close(uv_handle_t* handle) {
    switch (handle->type) {
    case UV_TCP:
        ev_io_stop(EV_DEFAULT_ &handle->write_watcher);
        ev_io_stop(EV_DEFAULT_ &handle->read_watcher);
        break;

    case UV_PREPARE:
        uv_prepare_stop(handle);
        break;

    case UV_CHECK:
        uv_check_stop(handle);
        break;

    case UV_IDLE:
        uv_idle_stop(handle);
        break;

    case UV_ASYNC:
        ev_async_stop(EV_DEFAULT_ &handle->async_watcher);
        ev_ref(EV_DEFAULT_UC);
        break;

    case UV_TIMER:
        if (ev_is_active(&handle->timer_watcher)) {
            ev_ref(EV_DEFAULT_UC);
        }
        ev_timer_stop(EV_DEFAULT_ &handle->timer_watcher);
        break;

    default:
        assert(0);
        return -1;
    }

    uv_flag_set(handle, UV_CLOSING);

    /* This is used to call the on_close callback in the next loop. */
    ev_idle_start(EV_DEFAULT_ &handle->next_watcher);
    ev_feed_event(EV_DEFAULT_ &handle->next_watcher, EV_IDLE);
    assert(ev_is_pending(&handle->next_watcher));

    return 0;
}
Exemple #16
0
static void uv__udp_sendmsg(uv_udp_t* handle) {
  assert(!ngx_queue_empty(&handle->write_queue)
      || !ngx_queue_empty(&handle->write_completed_queue));

  /* Write out pending data first. */
  uv__udp_run_pending(handle);

  /* Drain 'request completed' queue. */
  uv__udp_run_completed(handle);

  if (!ngx_queue_empty(&handle->write_completed_queue)) {
    /* Schedule completion callbacks. */
    ev_feed_event(handle->loop->ev, &handle->write_watcher, EV_WRITE);
  }
  else if (ngx_queue_empty(&handle->write_queue)) {
    /* Pending queue and completion queue empty, stop watcher. */
    uv__udp_watcher_stop(handle, &handle->write_watcher);
  }
}
Exemple #17
0
void bb_zmq_send_msg(char *identity, size_t identity_len, char *sid, size_t sid_len, char *t, size_t t_len, char *body, size_t body_len) {

	ev_feed_event(blastbeat.loop, &blastbeat.event_zmq, EV_READ);
	bb_raw_zmq_send_msg(identity, identity_len, sid, sid_len, t, t_len, body, body_len);
}
Exemple #18
0
int uv_pipe_connect(uv_connect_t* req,
                    uv_pipe_t* handle,
                    const char* name,
                    uv_connect_cb cb) {
  struct sockaddr_un sun;
  int saved_errno;
  int sockfd;
  int status;
  int r;

  saved_errno = errno;
  sockfd = -1;
  status = -1;

  if ((sockfd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1) {
    uv_err_new(handle->loop, errno);
    goto out;
  }

  memset(&sun, 0, sizeof sun);
  uv__strlcpy(sun.sun_path, name, sizeof(sun.sun_path));
  sun.sun_family = AF_UNIX;

  /* We don't check for EINPROGRESS. Think about it: the socket
   * is either there or not.
   */
  do {
    r = connect(sockfd, (struct sockaddr*)&sun, sizeof sun);
  }
  while (r == -1 && errno == EINTR);

  if (r == -1) {
    uv_err_new(handle->loop, errno);
    uv__close(sockfd);
    goto out;
  }

  uv__stream_open((uv_stream_t*)handle, sockfd, UV_READABLE | UV_WRITABLE);

  ev_io_start(handle->loop->ev, &handle->read_watcher);
  ev_io_start(handle->loop->ev, &handle->write_watcher);

  status = 0;

out:
  handle->delayed_error = status; /* Passed to callback. */
  handle->connect_req = req;
  req->handle = (uv_stream_t*)handle;
  req->type = UV_CONNECT;
  req->cb = cb;
  ngx_queue_init(&req->queue);

  /* Run callback on next tick. */
  ev_feed_event(handle->loop->ev, &handle->read_watcher, EV_CUSTOM);
  assert(ev_is_pending(&handle->read_watcher));

  /* Mimic the Windows pipe implementation, always
   * return 0 and let the callback handle errors.
   */
  errno = saved_errno;
  return 0;
}
Exemple #19
0
int uv_connect(uv_req_t* req, struct sockaddr* addr) {
    uv_handle_t* handle = req->handle;

    if (handle->fd <= 0) {
        int fd = socket(AF_INET, SOCK_STREAM, 0);
        if (fd < 0) {
            uv_err_new(handle, errno);
            return -1;
        }

        if (uv_tcp_open(handle, fd)) {
            close(fd);
            return -2;
        }
    }

    req->type = UV_CONNECT;
    ngx_queue_init(&req->queue);

    if (handle->connect_req) {
        uv_err_new(handle, EALREADY);
        return -1;
    }

    if (handle->type != UV_TCP) {
        uv_err_new(handle, ENOTSOCK);
        return -1;
    }

    handle->connect_req = req;

    int addrsize = sizeof(struct sockaddr_in);

    int r = connect(handle->fd, addr, addrsize);
    handle->delayed_error = 0;

    if (r != 0 && errno != EINPROGRESS) {
        switch (errno) {
        /* If we get a ECONNREFUSED wait until the next tick to report the
         * error. Solaris wants to report immediately--other unixes want to
         * wait.
         */
        case ECONNREFUSED:
            handle->delayed_error = errno;
            break;

        default:
            uv_err_new(handle, errno);
            return -1;
        }
    }

    assert(handle->write_watcher.data == handle);
    ev_io_start(EV_DEFAULT_ &handle->write_watcher);

    if (handle->delayed_error) {
        ev_feed_event(EV_DEFAULT_ &handle->write_watcher, EV_WRITE);
    }

    return 0;
}
Exemple #20
0
int uv__connect(uv_connect_t* req, uv_stream_t* stream, struct sockaddr* addr,
    socklen_t addrlen, uv_connect_cb cb) { 
  int sockfd;
  int r;

  if (stream->fd <= 0) {
    if ((sockfd = uv__socket(addr->sa_family, SOCK_STREAM, 0)) == -1) {
      uv__set_sys_error(stream->loop, errno);
      return -1;
    }

    if (uv__stream_open(stream, sockfd, UV_READABLE | UV_WRITABLE)) {
      uv__close(sockfd);
      return -2;
    }
  }

  uv__req_init(stream->loop, (uv_req_t*)req);
  req->cb = cb;
  req->handle = stream;
  req->type = UV_CONNECT;
  ngx_queue_init(&req->queue);

  if (stream->connect_req) {
    uv__set_sys_error(stream->loop, EALREADY);
    return -1;
  }

  if (stream->type != UV_TCP) {
    uv__set_sys_error(stream->loop, ENOTSOCK);
    return -1;
  }

  stream->connect_req = req;

  do {
    r = connect(stream->fd, addr, addrlen);
  }
  while (r == -1 && errno == EINTR);

  stream->delayed_error = 0;

  if (r != 0 && errno != EINPROGRESS) {
    switch (errno) {
      /* If we get a ECONNREFUSED wait until the next tick to report the
       * error. Solaris wants to report immediately--other unixes want to
       * wait.
       */
      case ECONNREFUSED:
        stream->delayed_error = errno;
        break;

      default:
        uv__set_sys_error(stream->loop, errno);
        return -1;
    }
  }

  assert(stream->write_watcher.data == stream);
  ev_io_start(stream->loop->ev, &stream->write_watcher);

  if (stream->delayed_error) {
    ev_feed_event(stream->loop->ev, &stream->write_watcher, EV_WRITE);
  }

  return 0;
}
Exemple #21
0
static int uv__connect(uv_connect_t* req, uv_tcp_t* tcp, struct sockaddr* addr,
    socklen_t addrlen, uv_connect_cb cb) {
  int r;

  if (tcp->fd <= 0) {
    int fd = socket(addr->sa_family, SOCK_STREAM, 0);

    if (fd < 0) {
      uv_err_new((uv_handle_t*)tcp, errno);
      return -1;
    }

    if (uv_tcp_open(tcp, fd)) {
      close(fd);
      return -2;
    }
  }

  uv__req_init((uv_req_t*)req);
  req->cb = cb;
  req->handle = (uv_stream_t*)tcp;
  req->type = UV_CONNECT;
  ngx_queue_init(&req->queue);

  if (tcp->connect_req) {
    uv_err_new((uv_handle_t*)tcp, EALREADY);
    return -1;
  }

  if (tcp->type != UV_TCP) {
    uv_err_new((uv_handle_t*)tcp, ENOTSOCK);
    return -1;
  }

  tcp->connect_req = req;

  r = connect(tcp->fd, addr, addrlen);

  tcp->delayed_error = 0;

  if (r != 0 && errno != EINPROGRESS) {
    switch (errno) {
      /* If we get a ECONNREFUSED wait until the next tick to report the
       * error. Solaris wants to report immediately--other unixes want to
       * wait.
       */
      case ECONNREFUSED:
        tcp->delayed_error = errno;
        break;

      default:
        uv_err_new((uv_handle_t*)tcp, errno);
        return -1;
    }
  }

  assert(tcp->write_watcher.data == tcp);
  ev_io_start(EV_DEFAULT_ &tcp->write_watcher);

  if (tcp->delayed_error) {
    ev_feed_event(EV_DEFAULT_ &tcp->write_watcher, EV_WRITE);
  }

  return 0;
}
Exemple #22
0
static int
evsock_write_handler (struct evsock *sock) {
    int closed = 0;
    ssize_t n;
    int err;
    unsigned long e;

    if (!sock->tx.eof && !sock->tx.buf.n) {
        n = sock->on_write(sock, sock->tx.buf.data, sizeof(sock->tx.buf.data), &closed);
        if (n == -1) {
            if (sock->ssl) {
                SSL_shutdown(sock->ssl);
                SSL_free(sock->ssl);
            }
            close(sock->fd);
            ev_io_stop(sock->loop, &sock->w);
            if (sock->data.destroy) {
                sock->data.destroy(sock->data.ptr);
            }
            return -1;
        }
        if (closed) {
            sock->tx.eof = 1;
        }
        if (!n && !sock->tx.eof) {
            sock->tx.suspend = 1;
            EV_IO_RESET(sock->loop, &sock->w, EVSOCK_NEED_EVENTS(sock));
            return 0;
        }
        sock->tx.buf.n += n;
    }
    if (sock->tx.buf.n) {
        if (sock->ssl) {
            n = SSL_write(sock->ssl, sock->tx.buf.data, sock->tx.buf.n);
            if (n <= 0) {
                err = SSL_get_error(sock->ssl, n);
                switch (err) {
                case SSL_ERROR_WANT_READ:
                    sock->tx.events = EV_READ;
                    EV_IO_RESET(sock->loop, &sock->w, EVSOCK_NEED_EVENTS(sock));
                    return 0;
                case SSL_ERROR_WANT_WRITE:
                    sock->tx.events = EV_WRITE;
                    EV_IO_RESET(sock->loop, &sock->w, EVSOCK_NEED_EVENTS(sock));
                    return 0;
                case SSL_ERROR_SYSCALL:
                    e = ERR_get_error();
                    if (!e) {
                        if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) {
                            return 0;
                        }
                    }
                default:
                    fprintf(stderr, "SSL_write: error\n");
                    SSL_shutdown(sock->ssl);
                    SSL_free(sock->ssl);
                    close(sock->fd);
                    ev_io_stop(sock->loop, &sock->w);
                    if (sock->data.destroy) {
                        sock->data.destroy(sock->data.ptr);
                    }
                    return -1;
                }
            }
            if ((!sock->rx.suspend && !sock->rx.closed) && SSL_pending(sock->ssl)) {
                ev_feed_event(sock->loop, &sock->w, sock->rx.events);
            }
        } else {
            n = send(sock->fd, sock->tx.buf.data, sock->tx.buf.n, 0);
            if (n == -1) {
                if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN) {
                    return 0;
                }
                perror("send");
                close(sock->fd);
                ev_io_stop(sock->loop, &sock->w);
                if (sock->data.destroy) {
                    sock->data.destroy(sock->data.ptr);
                }
                return -1;
            }
        }
        memmove(sock->tx.buf.data, sock->tx.buf.data + n, sock->tx.buf.n - n);
        sock->tx.buf.n -= n;
    }
    if (sock->tx.eof && !sock->tx.buf.n) {
        if (sock->rx.closed) {
            if (sock->ssl) {
                if (!(SSL_get_shutdown(sock->ssl) & SSL_SENT_SHUTDOWN)) {
                    SSL_shutdown(sock->ssl);
                }
                SSL_free(sock->ssl);
            }
            close(sock->fd);
            ev_io_stop(sock->loop, &sock->w);
            if (sock->data.destroy) {
                sock->data.destroy(sock->data.ptr);
            }
            return -1;
        }
        if (sock->ssl) {
            if (!(SSL_get_shutdown(sock->ssl) & SSL_SENT_SHUTDOWN)) {
                SSL_shutdown(sock->ssl);
            }
        } else {
            shutdown(sock->fd, SHUT_WR);
        }
        sock->tx.closed = 1;
    } else {
        sock->tx.events = EV_WRITE;
    }
    EV_IO_RESET(sock->loop, &sock->w, EVSOCK_NEED_EVENTS(sock));
    return 0;
}
Exemple #23
0
void uv__io_feed(uv_loop_t* loop, uv__io_t* handle, int event) {
  ev_feed_event(loop->ev, &handle->io_watcher, event);
}
Exemple #24
0
/* The buffers to be written must remain valid until the callback is called.
 * This is not required for the uv_buf_t array.
 */
int uv_write(uv_write_t* req, uv_stream_t* handle, uv_buf_t bufs[], int bufcnt,
    uv_write_cb cb) {
  int empty_queue;
  uv_tcp_t* tcp = (uv_tcp_t*)handle;

  /* Initialize the req */
  uv__req_init((uv_req_t*) req);
  req->cb = cb;
  req->handle = handle;
  ngx_queue_init(&req->queue);

  assert(handle->type == UV_TCP &&
      "uv_write (unix) does not yet support other types of streams");

  empty_queue = (tcp->write_queue_size == 0);
  assert(tcp->fd >= 0);

  ngx_queue_init(&req->queue);
  req->type = UV_WRITE;


  if (bufcnt < UV_REQ_BUFSML_SIZE) {
    req->bufs = req->bufsml;
  }
  else {
    req->bufs = malloc(sizeof(uv_buf_t) * bufcnt);
  }

  memcpy(req->bufs, bufs, bufcnt * sizeof(uv_buf_t));
  req->bufcnt = bufcnt;

  /*
   * fprintf(stderr, "cnt: %d bufs: %p bufsml: %p\n", bufcnt, req->bufs, req->bufsml);
   */

  req->write_index = 0;
  tcp->write_queue_size += uv__buf_count(bufs, bufcnt);

  /* Append the request to write_queue. */
  ngx_queue_insert_tail(&tcp->write_queue, &req->queue);

  assert(!ngx_queue_empty(&tcp->write_queue));
  assert(tcp->write_watcher.cb == uv__tcp_io);
  assert(tcp->write_watcher.data == tcp);
  assert(tcp->write_watcher.fd == tcp->fd);

  /* If the queue was empty when this function began, we should attempt to
   * do the write immediately. Otherwise start the write_watcher and wait
   * for the fd to become writable.
   */
  if (empty_queue) {
    if (uv__write(tcp)) {
      /* Error. uv_last_error has been set. */
      return -1;
    }
  }

  /* If the queue is now empty - we've flushed the request already. That
   * means we need to make the callback. The callback can only be done on a
   * fresh stack so we feed the event loop in order to service it.
   */
  if (ngx_queue_empty(&tcp->write_queue)) {
    ev_feed_event(EV_DEFAULT_ &tcp->write_watcher, EV_WRITE);
  } else {
    /* Otherwise there is data to write - so we should wait for the file
     * descriptor to become writable.
     */
    ev_io_start(EV_DEFAULT_ &tcp->write_watcher);
  }

  return 0;
}
Exemple #25
0
void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
  uv_async_t* async;
  uv_stream_t* stream;
  uv_process_t* process;

  handle->close_cb = close_cb;

  switch (handle->type) {
    case UV_NAMED_PIPE:
      uv_pipe_cleanup((uv_pipe_t*)handle);
      /* Fall through. */

    case UV_TTY:
    case UV_TCP:
      stream = (uv_stream_t*)handle;

      uv_read_stop(stream);
      ev_io_stop(stream->loop->ev, &stream->write_watcher);

      uv__close(stream->fd);
      stream->fd = -1;

      if (stream->accepted_fd >= 0) {
        uv__close(stream->accepted_fd);
        stream->accepted_fd = -1;
      }

      assert(!ev_is_active(&stream->read_watcher));
      assert(!ev_is_active(&stream->write_watcher));
      break;

    case UV_UDP:
      uv__udp_start_close((uv_udp_t*)handle);
      break;

    case UV_PREPARE:
      uv_prepare_stop((uv_prepare_t*) handle);
      break;

    case UV_CHECK:
      uv_check_stop((uv_check_t*) handle);
      break;

    case UV_IDLE:
      uv_idle_stop((uv_idle_t*) handle);
      break;

    case UV_ASYNC:
      async = (uv_async_t*)handle;
      ev_async_stop(async->loop->ev, &async->async_watcher);
      ev_ref(async->loop->ev);
      break;

    case UV_TIMER:
      uv_timer_stop((uv_timer_t*)handle);
      break;

    case UV_PROCESS:
      process = (uv_process_t*)handle;
      ev_child_stop(process->loop->ev, &process->child_watcher);
      break;

    case UV_FS_EVENT:
      uv__fs_event_destroy((uv_fs_event_t*)handle);
      break;

    default:
      assert(0);
  }

  handle->flags |= UV_CLOSING;

  /* This is used to call the on_close callback in the next loop. */
  ev_idle_start(handle->loop->ev, &handle->next_watcher);
  ev_feed_event(handle->loop->ev, &handle->next_watcher, EV_IDLE);
  assert(ev_is_pending(&handle->next_watcher));
}
Exemple #26
0
/* On success returns NULL. On error returns a pointer to the write request
 * which had the error.
 */
static uv_write_t* uv__write(uv_tcp_t* tcp) {
  uv_write_t* req;
  struct iovec* iov;
  int iovcnt;
  ssize_t n;

  assert(tcp->fd >= 0);

  /* TODO: should probably while(1) here until EAGAIN */

  /* Get the request at the head of the queue. */
  req = uv_write_queue_head(tcp);
  if (!req) {
    assert(tcp->write_queue_size == 0);
    return NULL;
  }

  assert(req->handle == (uv_stream_t*)tcp);

  /* Cast to iovec. We had to have our own uv_buf_t instead of iovec
   * because Windows's WSABUF is not an iovec.
   */
  assert(sizeof(uv_buf_t) == sizeof(struct iovec));
  iov = (struct iovec*) &(req->bufs[req->write_index]);
  iovcnt = req->bufcnt - req->write_index;

  /* Now do the actual writev. Note that we've been updating the pointers
   * inside the iov each time we write. So there is no need to offset it.
   */

  if (iovcnt == 1) {
    n = write(tcp->fd, iov[0].iov_base, iov[0].iov_len);
  }
  else {
    n = writev(tcp->fd, iov, iovcnt);
  }

  if (n < 0) {
    if (errno != EAGAIN) {
      /* Error */
      uv_err_new((uv_handle_t*)tcp, errno);
      return req;
    }
  } else {
    /* Successful write */

    /* Update the counters. */
    while (n >= 0) {
      uv_buf_t* buf = &(req->bufs[req->write_index]);
      size_t len = buf->len;

      assert(req->write_index < req->bufcnt);

      if (n < len) {
        buf->base += n;
        buf->len -= n;
        tcp->write_queue_size -= n;
        n = 0;

        /* There is more to write. Break and ensure the watcher is pending. */
        break;

      } else {
        /* Finished writing the buf at index req->write_index. */
        req->write_index++;

        assert(n >= len);
        n -= len;

        assert(tcp->write_queue_size >= len);
        tcp->write_queue_size -= len;

        if (req->write_index == req->bufcnt) {
          /* Then we're done! */
          assert(n == 0);

          /* Pop the req off tcp->write_queue. */
          ngx_queue_remove(&req->queue);
          if (req->bufs != req->bufsml) {
            free(req->bufs);
          }
          req->bufs = NULL;

          /* Add it to the write_completed_queue where it will have its
           * callback called in the near future.
           * TODO: start trying to write the next request.
           */
          ngx_queue_insert_tail(&tcp->write_completed_queue, &req->queue);
          ev_feed_event(EV_DEFAULT_ &tcp->write_watcher, EV_WRITE);
          return NULL;
        }
      }
    }
  }

  /* Either we've counted n down to zero or we've got EAGAIN. */
  assert(n == 0 || n == -1);

  /* We're not done. */
  ev_io_start(EV_DEFAULT_ &tcp->write_watcher);

  return NULL;
}