static int uv__send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[], int bufcnt, struct sockaddr* addr, socklen_t addrlen, uv_udp_send_cb send_cb) { assert(bufcnt > 0); if (uv__udp_maybe_deferred_bind(handle, addr->sa_family)) return -1; uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->bufcnt = bufcnt; if (bufcnt <= (int) ARRAY_SIZE(req->bufsml)) { req->bufs = req->bufsml; } else if ((req->bufs = malloc(bufcnt * sizeof(bufs[0]))) == NULL) { uv__set_sys_error(handle->loop, ENOMEM); return -1; } memcpy(req->bufs, bufs, bufcnt * sizeof(bufs[0])); ngx_queue_insert_tail(&handle->write_queue, &req->queue); uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); uv__handle_start(handle); return 0; }
static int uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[], int bufcnt, struct sockaddr* addr, socklen_t addrlen, uv_udp_send_cb send_cb) { if (uv__udp_maybe_deferred_bind(handle, addr->sa_family)) return -1; uv__req_init(handle->loop, (uv_req_t*)req); memcpy(&req->addr, addr, addrlen); req->addrlen = addrlen; req->send_cb = send_cb; req->handle = handle; req->bufcnt = bufcnt; req->type = UV_UDP_SEND; if (bufcnt <= UV_REQ_BUFSML_SIZE) { req->bufs = req->bufsml; } else if ((req->bufs = malloc(bufcnt * sizeof(bufs[0]))) == NULL) { uv__set_sys_error(handle->loop, ENOMEM); return -1; } memcpy(req->bufs, bufs, bufcnt * sizeof(bufs[0])); ngx_queue_insert_tail(&handle->write_queue, &req->queue); uv__udp_start_write_watcher(handle); return 0; }
int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb) { if (alloc_cb == NULL || recv_cb == NULL) { uv__set_artificial_error(handle->loop, UV_EINVAL); return -1; } if (uv__io_active(&handle->read_watcher)) { uv__set_artificial_error(handle->loop, UV_EALREADY); return -1; } if (uv__udp_maybe_deferred_bind(handle, AF_INET)) return -1; handle->alloc_cb = alloc_cb; handle->recv_cb = recv_cb; uv__udp_start_watcher(handle, &handle->read_watcher, uv__udp_recvmsg, UV__IO_READ); return 0; }
int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, uv_membership membership) { int err; struct sockaddr_in addr4; struct sockaddr_in6 addr6; if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) { err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR); if (err) return err; return uv__udp_set_membership4(handle, &addr4, interface_addr, membership); } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) { err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR); if (err) return err; return uv__udp_set_membership6(handle, &addr6, interface_addr, membership); } else { return -EINVAL; } }
int uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, unsigned int addrlen, uv_udp_send_cb send_cb) { int err; int empty_queue; assert(nbufs > 0); err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); if (err) return err; /* It's legal for send_queue_count > 0 even when the write_queue is empty; * it means there are error-state requests in the write_completed_queue that * will touch up send_queue_size/count later. */ empty_queue = (handle->send_queue_count == 0); uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->nbufs = nbufs; req->bufs = req->bufsml; if (nbufs > ARRAY_SIZE(req->bufsml)) req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); if (req->bufs == NULL) return -ENOMEM; memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); handle->send_queue_count++; QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); uv__handle_start(handle); if (empty_queue && !(handle->flags & UV_UDP_PROCESSING)) { uv__udp_sendmsg(handle); } else { uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); } return 0; }
int uv__udp_try_send(uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, unsigned int addrlen) { int err; struct msghdr h; ssize_t size; assert(nbufs > 0); /* already sending a message */ if (handle->send_queue_count != 0) return -EAGAIN; err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); if (err) return err; memset(&h, 0, sizeof h); h.msg_name = (struct sockaddr*) addr; h.msg_namelen = addrlen; h.msg_iov = (struct iovec*) bufs; h.msg_iovlen = nbufs; do { size = sendmsg(handle->io_watcher.fd, &h, 0); } while (size == -1 && errno == EINTR); if (size == -1) { if (errno == EAGAIN || errno == EWOULDBLOCK) return -EAGAIN; else return -errno; } return size; }
static int uv__send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[], int bufcnt, struct sockaddr* addr, socklen_t addrlen, uv_udp_send_cb send_cb) { int err; assert(bufcnt > 0); err = uv__udp_maybe_deferred_bind(handle, addr->sa_family); if (err) return err; uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->bufcnt = bufcnt; if (bufcnt <= (int) ARRAY_SIZE(req->bufsml)) req->bufs = req->bufsml; else req->bufs = malloc(bufcnt * sizeof(*bufs)); if (req->bufs == NULL) return -ENOMEM; memcpy(req->bufs, bufs, bufcnt * sizeof(bufs[0])); QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); uv__handle_start(handle); return 0; }
int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb) { int err; if (alloc_cb == NULL || recv_cb == NULL) return -EINVAL; if (uv__io_active(&handle->io_watcher, UV__POLLIN)) return -EALREADY; /* FIXME(bnoordhuis) Should be -EBUSY. */ err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0); if (err) return err; handle->alloc_cb = alloc_cb; handle->recv_cb = recv_cb; uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN); uv__handle_start(handle); return 0; }