/* handle send events on a nbt dgram socket */ static void dgm_socket_send(struct nbt_dgram_socket *dgmsock) { struct nbt_dgram_request *req; NTSTATUS status; while ((req = dgmsock->send_queue)) { size_t len; len = req->encoded.length; status = socket_sendto(dgmsock->sock, &req->encoded, &len, req->dest); if (NT_STATUS_IS_ERR(status)) { DEBUG(3,("Failed to send datagram of length %u to %s:%d: %s\n", (unsigned)req->encoded.length, req->dest->addr, req->dest->port, nt_errstr(status))); DLIST_REMOVE(dgmsock->send_queue, req); talloc_free(req); continue; } if (!NT_STATUS_IS_OK(status)) return; DLIST_REMOVE(dgmsock->send_queue, req); talloc_free(req); } TEVENT_FD_NOT_WRITEABLE(dgmsock->fde); return; }
/* handle send events on a smb_krb5 socket */ static void smb_krb5_socket_send(struct smb_krb5_socket *smb_krb5) { NTSTATUS status; size_t len; len = smb_krb5->request.length; status = socket_send(smb_krb5->sock, &smb_krb5->request, &len); if (!NT_STATUS_IS_OK(status)) return; TEVENT_FD_READABLE(smb_krb5->fde); TEVENT_FD_NOT_WRITEABLE(smb_krb5->fde); return; }
/* called when an incoming connection is writeable */ static void queue_io_write(struct ctdb_queue *queue) { while (queue->out_queue) { struct ctdb_queue_pkt *pkt = queue->out_queue; ssize_t n; if (queue->ctdb->flags & CTDB_FLAG_TORTURE) { n = write(queue->fd, pkt->data, 1); } else { n = write(queue->fd, pkt->data, pkt->length); } if (n == -1 && errno != EAGAIN && errno != EWOULDBLOCK) { if (pkt->length != pkt->full_length) { /* partial packet sent - we have to drop it */ DLIST_REMOVE(queue->out_queue, pkt); queue->out_queue_length--; talloc_free(pkt); } talloc_free(queue->fde); queue->fde = NULL; queue->fd = -1; tevent_schedule_immediate(queue->im, queue->ctdb->ev, queue_dead, queue); return; } if (n <= 0) return; if (n != pkt->length) { pkt->length -= n; pkt->data += n; return; } DLIST_REMOVE(queue->out_queue, pkt); queue->out_queue_length--; talloc_free(pkt); } TEVENT_FD_NOT_WRITEABLE(queue->fde); }
static void client_send(struct cli_ctx *cctx) { int ret; ret = sss_packet_send(cctx->creq->out, cctx->cfd); if (ret == EAGAIN) { /* not all data was sent, loop again */ return; } if (ret != EOK) { DEBUG(0, ("Failed to send data, aborting client!\n")); talloc_free(cctx); return; } /* ok all sent */ TEVENT_FD_NOT_WRITEABLE(cctx->cfde); TEVENT_FD_READABLE(cctx->cfde); talloc_free(cctx->creq); cctx->creq = NULL; return; }
/* trigger a run of the send queue */ _PUBLIC_ void packet_queue_run(struct packet_context *pc) { while (pc->send_queue) { struct send_element *el = pc->send_queue; NTSTATUS status; size_t nwritten; DATA_BLOB blob = data_blob_const(el->blob.data + el->nsent, el->blob.length - el->nsent); status = socket_send(pc->sock, &blob, &nwritten); if (NT_STATUS_IS_ERR(status)) { packet_error(pc, status); return; } if (!NT_STATUS_IS_OK(status)) { return; } el->nsent += nwritten; if (el->nsent == el->blob.length) { DLIST_REMOVE(pc->send_queue, el); if (el->send_callback) { pc->busy = true; el->send_callback(el->send_callback_private); pc->busy = false; if (pc->destructor_called) { talloc_free(pc); return; } } talloc_free(el); } } /* we're out of requests to send, so don't wait for write events any more */ TEVENT_FD_NOT_WRITEABLE(pc->fde); }
/* event loop handling using poll() */ static int poll_event_loop_poll(struct tevent_context *ev, struct timeval *tvalp) { struct poll_event_context *poll_ev = talloc_get_type_abort( ev->additional_data, struct poll_event_context); int pollrtn; int timeout = -1; int poll_errno; struct tevent_fd *fde = NULL; unsigned i; if (ev->signal_events && tevent_common_check_signal(ev)) { return 0; } if (tvalp != NULL) { timeout = tvalp->tv_sec * 1000; timeout += (tvalp->tv_usec + 999) / 1000; } poll_event_drain_signal_fd(poll_ev); if (!poll_event_setup_fresh(ev, poll_ev)) { return -1; } tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_BEFORE_WAIT); pollrtn = poll(poll_ev->fds, poll_ev->num_fds, timeout); poll_errno = errno; tevent_trace_point_callback(poll_ev->ev, TEVENT_TRACE_AFTER_WAIT); if (pollrtn == -1 && poll_errno == EINTR && ev->signal_events) { tevent_common_check_signal(ev); return 0; } if (pollrtn == 0 && tvalp) { /* we don't care about a possible delay here */ tevent_common_loop_timer_delay(ev); return 0; } if (pollrtn <= 0) { /* * No fd's ready */ return 0; } /* at least one file descriptor is ready - check which ones and call the handler, being careful to allow the handler to remove itself when called */ for (fde = ev->fd_events; fde; fde = fde->next) { unsigned idx = fde->additional_flags; struct pollfd *pfd; uint16_t flags = 0; if (idx == UINT64_MAX) { continue; } pfd = &poll_ev->fds[idx]; if (pfd->revents & POLLNVAL) { /* * the socket is dead! this should never * happen as the socket should have first been * made readable and that should have removed * the event, so this must be a bug. * * We ignore it here to match the epoll * behavior. */ tevent_debug(ev, TEVENT_DEBUG_ERROR, "POLLNVAL on fde[%p] fd[%d] - disabling\n", fde, pfd->fd); poll_ev->fdes[idx] = NULL; poll_ev->deleted = true; DLIST_REMOVE(ev->fd_events, fde); fde->event_ctx = NULL; continue; } if (pfd->revents & (POLLHUP|POLLERR)) { /* If we only wait for TEVENT_FD_WRITE, we should not tell the event handler about it, and remove the writable flag, as we only report errors when waiting for read events to match the select behavior. */ if (!(fde->flags & TEVENT_FD_READ)) { TEVENT_FD_NOT_WRITEABLE(fde); continue; } flags |= TEVENT_FD_READ; } if (pfd->revents & POLLIN) { flags |= TEVENT_FD_READ; } if (pfd->revents & POLLOUT) { flags |= TEVENT_FD_WRITE; } /* * Note that fde->flags could be changed when using * the poll_mt backend together with threads, * that why we need to check pfd->revents and fde->flags */ flags &= fde->flags; if (flags != 0) { DLIST_DEMOTE(ev->fd_events, fde, struct tevent_fd); fde->handler(ev, fde, flags, fde->private_data); return 0; } }
static void test_event_fd2_sock_handler(struct tevent_context *ev_ctx, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct test_event_fd2_sock *cur_sock = (struct test_event_fd2_sock *)private_data; struct test_event_fd2_state *state = cur_sock->state; struct test_event_fd2_sock *oth_sock = NULL; uint8_t v = 0, c; ssize_t ret; if (cur_sock == &state->sock0) { oth_sock = &state->sock1; } else { oth_sock = &state->sock0; } if (oth_sock->num_written == 1) { if (flags != (TEVENT_FD_READ | TEVENT_FD_WRITE)) { state->finished = true; state->error = __location__; return; } } if (cur_sock->num_read == oth_sock->num_written) { state->finished = true; state->error = __location__; return; } if (!(flags & TEVENT_FD_READ)) { state->finished = true; state->error = __location__; return; } if (oth_sock->num_read >= PIPE_BUF) { /* * On Linux we become writable once we've read * one byte. On Solaris we only become writable * again once we've read 4096 bytes. PIPE_BUF * is probably a safe bet to test against. * * There should be room to write a byte again */ if (!(flags & TEVENT_FD_WRITE)) { state->finished = true; state->error = __location__; return; } } if ((flags & TEVENT_FD_WRITE) && !cur_sock->got_full) { v = (uint8_t)cur_sock->num_written; ret = write(cur_sock->fd, &v, 1); if (ret != 1) { state->finished = true; state->error = __location__; return; } cur_sock->num_written++; if (cur_sock->num_written > 0x80000000) { state->finished = true; state->error = __location__; return; } return; } if (!cur_sock->got_full) { cur_sock->got_full = true; if (!oth_sock->got_full) { /* * cur_sock is full, * lets wait for oth_sock * to be filled */ tevent_fd_set_flags(cur_sock->fde, 0); return; } /* * oth_sock waited for cur_sock, * lets restart it */ tevent_fd_set_flags(oth_sock->fde, TEVENT_FD_READ|TEVENT_FD_WRITE); } ret = read(cur_sock->fd, &v, 1); if (ret != 1) { state->finished = true; state->error = __location__; return; } c = (uint8_t)cur_sock->num_read; if (c != v) { state->finished = true; state->error = __location__; return; } cur_sock->num_read++; if (cur_sock->num_read < oth_sock->num_written) { /* there is more to read */ return; } /* * we read everything, we need to remove TEVENT_FD_WRITE * to avoid spinning */ TEVENT_FD_NOT_WRITEABLE(cur_sock->fde); if (oth_sock->num_read == cur_sock->num_written) { /* * both directions are finished */ state->finished = true; } return; }
static void test_event_fd1_fde_handler(struct tevent_context *ev_ctx, struct tevent_fd *fde, uint16_t flags, void *private_data) { struct test_event_fd1_state *state = (struct test_event_fd1_state *)private_data; if (state->drain_done) { state->finished = true; state->error = __location__; return; } if (state->drain) { ssize_t ret; uint8_t c = 0; if (!(flags & TEVENT_FD_READ)) { state->finished = true; state->error = __location__; return; } ret = read(state->sock[0], &c, 1); if (ret == 1) { return; } /* * end of test... */ tevent_fd_set_flags(fde, 0); state->drain_done = true; return; } if (!state->got_write) { uint8_t c = 0; if (flags != TEVENT_FD_WRITE) { state->finished = true; state->error = __location__; return; } state->got_write = true; /* * we write to the other socket... */ write(state->sock[1], &c, 1); TEVENT_FD_NOT_WRITEABLE(fde); TEVENT_FD_READABLE(fde); return; } if (!state->got_read) { if (flags != TEVENT_FD_READ) { state->finished = true; state->error = __location__; return; } state->got_read = true; TEVENT_FD_NOT_READABLE(fde); return; } state->finished = true; state->error = __location__; return; }