Exemplo n.º 1
0
static void uv__write_callbacks(uv_tcp_t* tcp) {
  int callbacks_made = 0;
  ngx_queue_t* q;
  uv_write_t* req;

  while (!ngx_queue_empty(&tcp->write_completed_queue)) {
    /* Pop a req off write_completed_queue. */
    q = ngx_queue_head(&tcp->write_completed_queue);
    assert(q);
    req = ngx_queue_data(q, struct uv_write_s, queue);
    ngx_queue_remove(q);

    /* NOTE: call callback AFTER freeing the request data. */
    if (req->cb) {
      req->cb(req, 0);
    }

    callbacks_made++;
  }

  assert(ngx_queue_empty(&tcp->write_completed_queue));

  /* Write queue drained. */
  if (!uv_write_queue_head(tcp)) {
    uv__drain(tcp);
  }
}
Exemplo n.º 2
0
static void uv__write_callbacks(uv_stream_t* stream) {
  uv_write_t* req;
  ngx_queue_t* q;

  while (!ngx_queue_empty(&stream->write_completed_queue)) {
    /* Pop a req off write_completed_queue. */
    q = ngx_queue_head(&stream->write_completed_queue);
    req = ngx_queue_data(q, uv_write_t, queue);
    ngx_queue_remove(q);
    uv__req_unregister(stream->loop, req);

    /* NOTE: call callback AFTER freeing the request data. */
    if (req->cb) {
      uv__set_sys_error(stream->loop, req->error);
      req->cb(req, req->error ? -1 : 0);
    }
  }

  assert(ngx_queue_empty(&stream->write_completed_queue));

  /* Write queue drained. */
  if (!uv_write_queue_head(stream)) {
    uv__drain(stream);
  }
}
Exemplo n.º 3
0
static void uv__drain(uv_stream_t* stream) {
  uv_shutdown_t* req;

  assert(!uv_write_queue_head(stream));
  assert(stream->write_queue_size == 0);

  uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);

  /* Shutdown? */
  if ((stream->flags & UV_STREAM_SHUTTING) &&
      !(stream->flags & UV_CLOSING) &&
      !(stream->flags & UV_STREAM_SHUT)) {
    assert(stream->shutdown_req);

    req = stream->shutdown_req;
    stream->shutdown_req = NULL;
    uv__req_unregister(stream->loop, req);

    if (shutdown(uv__stream_fd(stream), SHUT_WR)) {
      /* Error. Report it. User should call uv_close(). */
      uv__set_sys_error(stream->loop, errno);
      if (req->cb) {
        req->cb(req, -1);
      }
    } else {
      uv__set_sys_error(stream->loop, 0);
      ((uv_handle_t*) stream)->flags |= UV_STREAM_SHUT;
      if (req->cb) {
        req->cb(req, 0);
      }
    }
  }
}
Exemplo n.º 4
0
static void uv__drain(uv_handle_t* handle) {
    assert(!uv_write_queue_head(handle));
    assert(handle->write_queue_size == 0);

    ev_io_stop(EV_DEFAULT_ &handle->write_watcher);

    /* Shutdown? */
    if (uv_flag_is_set(handle, UV_SHUTTING) &&
            !uv_flag_is_set(handle, UV_CLOSING) &&
            !uv_flag_is_set(handle, UV_SHUT)) {
        assert(handle->shutdown_req);

        uv_req_t* req = handle->shutdown_req;
        uv_shutdown_cb cb = req->cb;

        if (shutdown(handle->fd, SHUT_WR)) {
            /* Error. Nothing we can do, close the handle. */
            uv_err_new(handle, errno);
            uv_close(handle);
            if (cb) cb(req, -1);
        } else {
            uv_err_new(handle, 0);
            uv_flag_set(handle, UV_SHUT);
            if (cb) cb(req, 0);
        }
    }
}
Exemplo n.º 5
0
static void uv__drain(uv_tcp_t* tcp) {
  uv_shutdown_t* req;

  assert(!uv_write_queue_head(tcp));
  assert(tcp->write_queue_size == 0);

  ev_io_stop(EV_DEFAULT_ &tcp->write_watcher);

  /* Shutdown? */
  if (uv_flag_is_set((uv_handle_t*)tcp, UV_SHUTTING) &&
      !uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING) &&
      !uv_flag_is_set((uv_handle_t*)tcp, UV_SHUT)) {
    assert(tcp->shutdown_req);

    req = tcp->shutdown_req;

    if (shutdown(tcp->fd, SHUT_WR)) {
      /* Error. Report it. User should call uv_close(). */
      uv_err_new((uv_handle_t*)tcp, errno);
      if (req->cb) {
        req->cb(req, -1);
      }
    } else {
      uv_err_new((uv_handle_t*)tcp, 0);
      uv_flag_set((uv_handle_t*)tcp, UV_SHUT);
      if (req->cb) {
        req->cb(req, 0);
      }
    }
  }
}
Exemplo n.º 6
0
static void uv__write_callbacks(uv_stream_t* stream) {
  int callbacks_made = 0;
  ngx_queue_t* q;
  uv_write_t* req;

  while (!ngx_queue_empty(&stream->write_completed_queue)) {
    /* Pop a req off write_completed_queue. */
    q = ngx_queue_head(&stream->write_completed_queue);
    assert(q);
    req = ngx_queue_data(q, struct uv_write_s, queue);
    ngx_queue_remove(q);

    /* NOTE: call callback AFTER freeing the request data. */
    if (req->cb) {
      uv__set_artificial_error(stream->loop, req->error);
      req->cb(req, req->error ? -1 : 0);
    }

    callbacks_made++;
  }

  assert(ngx_queue_empty(&stream->write_completed_queue));

  /* Write queue drained. */
  if (!uv_write_queue_head(stream)) {
    uv__drain(stream);
  }
}
Exemplo n.º 7
0
static void uv__drain(uv_stream_t* stream) {
  uv_shutdown_t* req;

  assert(!uv_write_queue_head(stream));
  assert(stream->write_queue_size == 0);

  ev_io_stop(stream->loop->ev, &stream->write_watcher);

  /* Shutdown? */
  if ((stream->flags & UV_SHUTTING) &&
      !(stream->flags & UV_CLOSING) &&
      !(stream->flags & UV_SHUT)) {
    assert(stream->shutdown_req);

    req = stream->shutdown_req;

    if (shutdown(stream->fd, SHUT_WR)) {
      /* Error. Report it. User should call uv_close(). */
      uv_err_new(stream->loop, errno);
      if (req->cb) {
        req->cb(req, -1);
      }
    } else {
      uv_err_new(stream->loop, 0);
      ((uv_handle_t*) stream)->flags |= UV_SHUT;
      if (req->cb) {
        req->cb(req, 0);
      }
    }
  }
}
Exemplo n.º 8
0
/* On success returns NULL. On error returns a pointer to the write request
 * which had the error.
 */
static void uv__write(uv_stream_t* stream) {
  uv_write_t* req;
  struct iovec* iov;
  int iovcnt;
  ssize_t n;

  if (stream->flags & UV_CLOSING) {
    /* Handle was closed this tick. We've received a stale
     * 'is writable' callback from the event loop, ignore.
     */
    return;
  }

start:

  assert(uv__stream_fd(stream) >= 0);

  /* Get the request at the head of the queue. */
  req = uv_write_queue_head(stream);
  if (!req) {
    assert(stream->write_queue_size == 0);
    return;
  }

  assert(req->handle == stream);

  /*
   * Cast to iovec. We had to have our own uv_buf_t instead of iovec
   * because Windows's WSABUF is not an iovec.
   */
  assert(sizeof(uv_buf_t) == sizeof(struct iovec));
  iov = (struct iovec*) &(req->bufs[req->write_index]);
  iovcnt = req->bufcnt - req->write_index;

  /*
   * Now do the actual writev. Note that we've been updating the pointers
   * inside the iov each time we write. So there is no need to offset it.
   */

  if (req->send_handle) {
    struct msghdr msg;
    char scratch[64];
    struct cmsghdr *cmsg;
    int fd_to_send = req->send_handle->io_watcher.fd;

    assert(fd_to_send >= 0);

    msg.msg_name = NULL;
    msg.msg_namelen = 0;
    msg.msg_iov = iov;
    msg.msg_iovlen = iovcnt;
    msg.msg_flags = 0;

    msg.msg_control = (void*) scratch;
    msg.msg_controllen = CMSG_LEN(sizeof(fd_to_send));

    cmsg = CMSG_FIRSTHDR(&msg);
    cmsg->cmsg_level = SOL_SOCKET;
    cmsg->cmsg_type = SCM_RIGHTS;
    cmsg->cmsg_len = msg.msg_controllen;

    /* silence aliasing warning */
    {
      void* pv = CMSG_DATA(cmsg);
      int* pi = pv;
      *pi = fd_to_send;
    }

    do {
      n = sendmsg(uv__stream_fd(stream), &msg, 0);
    }
    while (n == -1 && errno == EINTR);
  } else {
    do {
      if (iovcnt == 1) {
        n = write(uv__stream_fd(stream), iov[0].iov_base, iov[0].iov_len);
      } else {
        n = writev(uv__stream_fd(stream), iov, iovcnt);
      }
    }
    while (n == -1 && errno == EINTR);
  }

  if (n < 0) {
    if (errno != EAGAIN && errno != EWOULDBLOCK) {
      /* Error */
      req->error = errno;
      stream->write_queue_size -= uv__write_req_size(req);
      uv__write_req_finish(req);
      return;
    } else if (stream->flags & UV_STREAM_BLOCKING) {
      /* If this is a blocking stream, try again. */
      goto start;
    }
  } else {
    /* Successful write */

    while (n >= 0) {
      uv_buf_t* buf = &(req->bufs[req->write_index]);
      size_t len = buf->len;

      assert(req->write_index < req->bufcnt);

      if ((size_t)n < len) {
        buf->base += n;
        buf->len -= n;
        stream->write_queue_size -= n;
        n = 0;

        /* There is more to write. */
        if (stream->flags & UV_STREAM_BLOCKING) {
          /*
           * If we're blocking then we should not be enabling the write
           * watcher - instead we need to try again.
           */
          goto start;
        } else {
          /* Break loop and ensure the watcher is pending. */
          break;
        }

      } else {
        /* Finished writing the buf at index req->write_index. */
        req->write_index++;

        assert((size_t)n >= len);
        n -= len;

        assert(stream->write_queue_size >= len);
        stream->write_queue_size -= len;

        if (req->write_index == req->bufcnt) {
          /* Then we're done! */
          assert(n == 0);
          uv__write_req_finish(req);
          /* TODO: start trying to write the next request. */
          return;
        }
      }
    }
  }

  /* Either we've counted n down to zero or we've got EAGAIN. */
  assert(n == 0 || n == -1);

  /* Only non-blocking streams should use the write_watcher. */
  assert(!(stream->flags & UV_STREAM_BLOCKING));

  /* We're not done. */
  uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT);
}
Exemplo n.º 9
0
void uv__write(uv_handle_t* handle) {
    assert(handle->fd >= 0);

    /* TODO: should probably while(1) here until EAGAIN */

    /* Get the request at the head of the queue. */
    uv_req_t* req = uv_write_queue_head(handle);
    if (!req) {
        assert(handle->write_queue_size == 0);
        uv__drain(handle);
        return;
    }

    assert(req->handle == handle);

    /* Cast to iovec. We had to have our own uv_buf_t instead of iovec
     * because Windows's WSABUF is not an iovec.
     */
    assert(sizeof(uv_buf_t) == sizeof(struct iovec));
    struct iovec* iov = (struct iovec*) &(req->bufs[req->write_index]);
    int iovcnt = req->bufcnt - req->write_index;

    /* Now do the actual writev. Note that we've been updating the pointers
     * inside the iov each time we write. So there is no need to offset it.
     */

    ssize_t n = writev(handle->fd, iov, iovcnt);

    uv_write_cb cb = req->cb;

    if (n < 0) {
        if (errno != EAGAIN) {
            uv_err_t err = uv_err_new(handle, errno);

            /* XXX How do we handle the error? Need test coverage here. */
            uv_close(handle);

            if (cb) {
                cb(req, -1);
            }
            return;
        }
    } else {
        /* Successful write */

        /* The loop updates the counters. */
        while (n > 0) {
            uv_buf_t* buf = &(req->bufs[req->write_index]);
            size_t len = buf->len;

            assert(req->write_index < req->bufcnt);

            if (n < len) {
                buf->base += n;
                buf->len -= n;
                handle->write_queue_size -= n;
                n = 0;

                /* There is more to write. Break and ensure the watcher is pending. */
                break;

            } else {
                /* Finished writing the buf at index req->write_index. */
                req->write_index++;

                assert(n >= len);
                n -= len;

                assert(handle->write_queue_size >= len);
                handle->write_queue_size -= len;

                if (req->write_index == req->bufcnt) {
                    /* Then we're done! */
                    assert(n == 0);

                    /* Pop the req off handle->write_queue. */
                    ngx_queue_remove(&req->queue);
                    free(req->bufs); /* FIXME: we should not be allocing for each read */
                    req->bufs = NULL;

                    /* NOTE: call callback AFTER freeing the request data. */
                    if (cb) {
                        cb(req, 0);
                    }

                    if (!ngx_queue_empty(&handle->write_queue)) {
                        assert(handle->write_queue_size > 0);
                    } else {
                        /* Write queue drained. */
                        uv__drain(handle);
                    }

                    return;
                }
            }
        }
    }

    /* Either we've counted n down to zero or we've got EAGAIN. */
    assert(n == 0 || n == -1);

    /* We're not done yet. */
    assert(ev_is_active(&handle->write_watcher));
    ev_io_start(EV_DEFAULT_ &handle->write_watcher);
}
Exemplo n.º 10
0
/* On success returns NULL. On error returns a pointer to the write request
 * which had the error.
 */
static uv_write_t* uv__write(uv_tcp_t* tcp) {
  uv_write_t* req;
  struct iovec* iov;
  int iovcnt;
  ssize_t n;

  assert(tcp->fd >= 0);

  /* TODO: should probably while(1) here until EAGAIN */

  /* Get the request at the head of the queue. */
  req = uv_write_queue_head(tcp);
  if (!req) {
    assert(tcp->write_queue_size == 0);
    return NULL;
  }

  assert(req->handle == (uv_stream_t*)tcp);

  /* Cast to iovec. We had to have our own uv_buf_t instead of iovec
   * because Windows's WSABUF is not an iovec.
   */
  assert(sizeof(uv_buf_t) == sizeof(struct iovec));
  iov = (struct iovec*) &(req->bufs[req->write_index]);
  iovcnt = req->bufcnt - req->write_index;

  /* Now do the actual writev. Note that we've been updating the pointers
   * inside the iov each time we write. So there is no need to offset it.
   */

  if (iovcnt == 1) {
    n = write(tcp->fd, iov[0].iov_base, iov[0].iov_len);
  }
  else {
    n = writev(tcp->fd, iov, iovcnt);
  }

  if (n < 0) {
    if (errno != EAGAIN) {
      /* Error */
      uv_err_new((uv_handle_t*)tcp, errno);
      return req;
    }
  } else {
    /* Successful write */

    /* Update the counters. */
    while (n >= 0) {
      uv_buf_t* buf = &(req->bufs[req->write_index]);
      size_t len = buf->len;

      assert(req->write_index < req->bufcnt);

      if (n < len) {
        buf->base += n;
        buf->len -= n;
        tcp->write_queue_size -= n;
        n = 0;

        /* There is more to write. Break and ensure the watcher is pending. */
        break;

      } else {
        /* Finished writing the buf at index req->write_index. */
        req->write_index++;

        assert(n >= len);
        n -= len;

        assert(tcp->write_queue_size >= len);
        tcp->write_queue_size -= len;

        if (req->write_index == req->bufcnt) {
          /* Then we're done! */
          assert(n == 0);

          /* Pop the req off tcp->write_queue. */
          ngx_queue_remove(&req->queue);
          if (req->bufs != req->bufsml) {
            free(req->bufs);
          }
          req->bufs = NULL;

          /* Add it to the write_completed_queue where it will have its
           * callback called in the near future.
           * TODO: start trying to write the next request.
           */
          ngx_queue_insert_tail(&tcp->write_completed_queue, &req->queue);
          ev_feed_event(EV_DEFAULT_ &tcp->write_watcher, EV_WRITE);
          return NULL;
        }
      }
    }
  }

  /* Either we've counted n down to zero or we've got EAGAIN. */
  assert(n == 0 || n == -1);

  /* We're not done. */
  ev_io_start(EV_DEFAULT_ &tcp->write_watcher);

  return NULL;
}
Exemplo n.º 11
0
Arquivo: stream.c Projeto: Darkie/node
/* On success returns NULL. On error returns a pointer to the write request
 * which had the error.
 */
static void uv__write(uv_stream_t* stream) {
  uv_write_t* req;
  struct iovec* iov;
  int iovcnt;
  ssize_t n;

  assert(stream->fd >= 0);

  /* TODO: should probably while(1) here until EAGAIN */

  /* Get the request at the head of the queue. */
  req = uv_write_queue_head(stream);
  if (!req) {
    assert(stream->write_queue_size == 0);
    return;
  }

  assert(req->handle == stream);

  /* Cast to iovec. We had to have our own uv_buf_t instead of iovec
   * because Windows's WSABUF is not an iovec.
   */
  assert(sizeof(uv_buf_t) == sizeof(struct iovec));
  iov = (struct iovec*) &(req->bufs[req->write_index]);
  iovcnt = req->bufcnt - req->write_index;

  /* Now do the actual writev. Note that we've been updating the pointers
   * inside the iov each time we write. So there is no need to offset it.
   */

  if (req->send_handle) {
    struct msghdr msg;
    char scratch[64];
    struct cmsghdr *cmsg;
    int fd_to_send = req->send_handle->fd;

    assert(fd_to_send >= 0);

    msg.msg_name = NULL;
    msg.msg_namelen = 0;
    msg.msg_iov = iov;
    msg.msg_iovlen = iovcnt;
    msg.msg_flags = 0;

    msg.msg_control = (void*) scratch;
    msg.msg_controllen = CMSG_LEN(sizeof(fd_to_send));

    cmsg = CMSG_FIRSTHDR(&msg);
    cmsg->cmsg_level = SOL_SOCKET;
    cmsg->cmsg_type = SCM_RIGHTS;
    cmsg->cmsg_len = msg.msg_controllen;
    *(int*) CMSG_DATA(cmsg) = fd_to_send;

    do {
      n = sendmsg(stream->fd, &msg, 0);
    }
    while (n == -1 && errno == EINTR);
  } else {
    do {
      if (iovcnt == 1) {
        n = write(stream->fd, iov[0].iov_base, iov[0].iov_len);
      } else {
        n = writev(stream->fd, iov, iovcnt);
      }
    }
    while (n == -1 && errno == EINTR);
  }

  if (n < 0) {
    if (errno != EAGAIN) {
      /* Error */
      req->error = errno;
      stream->write_queue_size -= uv__write_req_size(req);
      uv__write_req_finish(req);
      return;
    }
  } else {
    /* Successful write */

    /* Update the counters. */
    while (n >= 0) {
      uv_buf_t* buf = &(req->bufs[req->write_index]);
      size_t len = buf->len;

      assert(req->write_index < req->bufcnt);

      if ((size_t)n < len) {
        buf->base += n;
        buf->len -= n;
        stream->write_queue_size -= n;
        n = 0;

        /* There is more to write. Break and ensure the watcher is pending. */
        break;

      } else {
        /* Finished writing the buf at index req->write_index. */
        req->write_index++;

        assert((size_t)n >= len);
        n -= len;

        assert(stream->write_queue_size >= len);
        stream->write_queue_size -= len;

        if (req->write_index == req->bufcnt) {
          /* Then we're done! */
          assert(n == 0);
          uv__write_req_finish(req);
          /* TODO: start trying to write the next request. */
          return;
        }
      }
    }
  }

  /* Either we've counted n down to zero or we've got EAGAIN. */
  assert(n == 0 || n == -1);

  /* We're not done. */
  ev_io_start(stream->loop->ev, &stream->write_watcher);
}