static void on_bind_readable(verto_ctx *vctx, verto_ev *ev) { const char *errstr = "error"; LDAPMessage *results; struct otpd_queue_item *item = NULL; int i, rslt; (void)vctx; rslt = ldap_result(verto_get_private(ev), LDAP_RES_ANY, 0, NULL, &results); if (rslt != LDAP_RES_BIND) { if (rslt <= 0) results = NULL; ldap_msgfree(results); otpd_log_err(EIO, "IO error received on bind socket"); verto_break(ctx.vctx); ctx.exitstatus = 1; return; } item = otpd_queue_pop_msgid(&ctx.bind.responses, ldap_msgid(results)); if (item == NULL) { ldap_msgfree(results); return; } item->msgid = -1; rslt = ldap_parse_result(verto_get_private(ev), results, &i, NULL, NULL, NULL, NULL, 0); if (rslt != LDAP_SUCCESS) { errstr = ldap_err2string(rslt); goto error; } rslt = i; if (rslt != LDAP_SUCCESS) { errstr = ldap_err2string(rslt); goto error; } item->sent = 0; i = krad_packet_new_response(ctx.kctx, SECRET, krad_code_name2num("Access-Accept"), NULL, item->req, &item->rsp); if (i != 0) { errstr = krb5_get_error_message(ctx.kctx, i); goto error; } error: if (item != NULL) otpd_log_req(item->req, "bind end: %s", item->rsp != NULL ? "success" : errstr); ldap_msgfree(results); otpd_queue_push(&ctx.stdio.responses, item); verto_set_flags(ctx.stdio.writer, VERTO_EV_FLAG_PERSIST | VERTO_EV_FLAG_IO_ERROR | VERTO_EV_FLAG_IO_READ | VERTO_EV_FLAG_IO_WRITE); }
static void on_bind_writable(verto_ctx *vctx, verto_ev *ev) { LDAPControl control = { OTP_REQUIRED_OID, {}, true }; LDAPControl *ctrls[] = { &control, NULL }; struct otpd_queue *push = &ctx.stdio.responses; const krb5_data *data; struct berval cred; struct otpd_queue_item *item; int i; (void)vctx; item = otpd_queue_pop(&ctx.bind.requests); if (item == NULL) { verto_set_flags(ctx.bind.io, VERTO_EV_FLAG_PERSIST | VERTO_EV_FLAG_IO_ERROR | VERTO_EV_FLAG_IO_READ); return; } if (item->user.dn == NULL) goto error; data = krad_packet_get_attr(item->req, krad_attr_name2num("User-Password"), 0); if (data == NULL) goto error; cred.bv_val = data->data; cred.bv_len = data->length; i = ldap_sasl_bind(verto_get_private(ev), item->user.dn, LDAP_SASL_SIMPLE, &cred, ctrls, NULL, &item->msgid); if (i != LDAP_SUCCESS) { otpd_log_err(errno, "Unable to initiate bind: %s", ldap_err2string(i)); verto_break(ctx.vctx); ctx.exitstatus = 1; } otpd_log_req(item->req, "bind start: %s", item->user.dn); push = &ctx.bind.responses; error: otpd_queue_push(push, item); }
static void gp_handle_reply(verto_ctx *vctx, verto_ev *ev) { struct gp_workers *w; struct gp_query *q = NULL; char dummy; int ret; w = verto_get_private(ev); /* first read out the dummy so the pipe doesn't get clogged */ ret = read(w->sig_pipe[0], &dummy, 1); if (ret) { /* ignore errors */ } /* grab a query reply if any */ if (w->reply_list) { /* ======> POOL LOCK */ pthread_mutex_lock(&w->lock); if (w->reply_list != NULL) { q = w->reply_list; w->reply_list = q->next; } /* <====== POOL LOCK */ pthread_mutex_unlock(&w->lock); } if (q) { switch (q->status) { case GP_QUERY_IN: /* ?! fallback and kill client conn */ case GP_QUERY_ERR: gp_conn_free(q->conn); gp_query_free(q, true); break; case GP_QUERY_OUT: gp_socket_send_data(vctx, q->conn, q->buffer, q->buflen); gp_query_free(q, false); break; } } /* while we are at it, check if there is anything in the wait list * we need to process, as one thread just got free :-) */ q = NULL; if (w->wait_list) { /* only the dispatcher handles wait_list * so we do not need to lock around it */ if (w->wait_list) { q = w->wait_list; w->wait_list = q->next; q->next = NULL; } } if (q) { gp_query_assign(w, q); } }
void accept_sock_conn(verto_ctx *vctx, verto_ev *ev) { struct gp_conn *conn = NULL; int listen_fd; int fd = -1; int ret; conn = calloc(1, sizeof(struct gp_conn)); if (!conn) { ret = ENOMEM; goto done; } conn->sock_ctx = verto_get_private(ev); conn->us.sd = -1; listen_fd = verto_get_fd(ev); fd = accept(listen_fd, (struct sockaddr *)&conn->us.sock_addr, &conn->us.sock_addr_len); if (fd == -1) { ret = errno; if (ret == EINTR) { /* let the event loop retry later */ return; } goto done; } conn->us.sd = fd; ret = set_status_flags(fd, O_NONBLOCK); if (ret) { GPDEBUG("Failed to set O_NONBLOCK on %d!\n", fd); goto done; } ret = set_fd_flags(fd, FD_CLOEXEC); if (ret) { GPDEBUG("Failed to set FD_CLOEXEC on %d!\n", fd); goto done; } ret = get_peercred(fd, conn); if (ret) { goto done; } GPDEBUG("Client connected (fd = %d)", fd); if (conn->creds.type & CRED_TYPE_UNIX) { GPDEBUG(" (pid = %d) (uid = %d) (gid = %d)", conn->creds.ucred.pid, conn->creds.ucred.uid, conn->creds.ucred.gid); } if (conn->creds.type & CRED_TYPE_SELINUX) { GPDEBUG(" (context = %s)", SELINUX_context_str(conn->selinux_ctx)); } GPDEBUG("\n"); gp_setup_reader(vctx, conn); ret = 0; done: if (ret) { GPERROR("Error connecting client: (%d:%s)", ret, gp_strerror(ret)); gp_conn_free(conn); } }
static void gp_socket_write(verto_ctx *vctx, verto_ev *ev) { struct gp_buffer *wbuf; struct iovec iov[2]; uint32_t size; ssize_t wn; int vecs; int fd; fd = verto_get_fd(ev); wbuf = verto_get_private(ev); vecs = 0; if (wbuf->pos == 0) { /* first write, send the buffer size as packet header */ size = wbuf->size | FRAGMENT_BIT; size = htonl(size); iov[0].iov_base = &size; iov[0].iov_len = sizeof(size); vecs = 1; } iov[vecs].iov_base = wbuf->data + wbuf->pos; iov[vecs].iov_len = wbuf->size - wbuf->pos; vecs++; errno = 0; wn = writev(fd, iov, vecs); if (wn == -1) { if (errno == EAGAIN || errno == EINTR) { /* try again later */ gp_socket_schedule_write(vctx, wbuf); } else { /* error on socket, close and release it */ gp_conn_free(wbuf->conn); gp_buffer_free(wbuf); } return; } if (vecs == 2) { if (wn < sizeof(size)) { /* don't bother trying to handle sockets that can't * buffer even 4 bytes */ gp_conn_free(wbuf->conn); gp_buffer_free(wbuf); return; } wn -= sizeof(size); } wbuf->pos += wn; if (wbuf->size > wbuf->pos) { /* short write, reschedule */ gp_socket_schedule_write(vctx, wbuf); } else { /* now setup again the reader */ gp_setup_reader(vctx, wbuf->conn); /* all done, free write context */ gp_buffer_free(wbuf); } }
static void gp_socket_read(verto_ctx *vctx, verto_ev *ev) { struct gp_buffer *rbuf; uint32_t size; bool header = false; size_t rn; int ret; int fd; fd = verto_get_fd(ev); rbuf = verto_get_private(ev); if (rbuf->data == NULL) { header = true; /* new connection, need to read length first */ rn = read(fd, &size, sizeof(uint32_t)); if (rn == -1) { if (errno == EAGAIN || errno == EINTR) { /* spin again */ ret = EAGAIN; } else { ret = EIO; } goto done; } if (rn != sizeof(uint32_t)) { /* client closed, * or we didn't get even 4 bytes, * close conn, not worth trying 1 byte reads at this time */ ret = EIO; goto done; } /* allocate buffer for receiving data */ rbuf->size = ntohl(size); /* FIXME: need to support multiple fragments */ /* for now just make sure we have the last fragment bit * then remove it */ if (rbuf->size & FRAGMENT_BIT) { rbuf->size &= ~FRAGMENT_BIT; } else { ret = EIO; goto done; } if (rbuf->size > MAX_RPC_SIZE) { /* req too big close conn. */ ret = EIO; goto done; } rbuf->data = malloc(rbuf->size); if (!rbuf->data) { ret = ENOMEM; goto done; } } errno = 0; rn = read(fd, rbuf->data + rbuf->pos, rbuf->size - rbuf->pos); if (rn == -1) { if (errno == EAGAIN || errno == EINTR) { /* spin again */ ret = EAGAIN; } else { ret = EIO; } goto done; } if (rn == 0) { if (!header) { /* client closed before the buffer was fully read */ ret = EIO; } else { ret = EAGAIN; } goto done; } rbuf->pos += rn; if (rbuf->pos == rbuf->size) { /* got all data, hand over packet */ ret = gp_query_new(rbuf->conn->sock_ctx->gpctx->workers, rbuf->conn, rbuf->data, rbuf->size); if (ret != 0) { /* internal error, not much we can do */ goto done; } /* we successfully handed over the data */ rbuf->data = NULL; gp_buffer_free(rbuf); return; } ret = EAGAIN; done: switch (ret) { case EAGAIN: gp_socket_schedule_read(vctx, rbuf); return; default: gp_conn_free(rbuf->conn); gp_buffer_free(rbuf); } }
void free_unix_socket(verto_ctx *ctx, verto_ev *ev) { struct gp_sock_ctx *sock_ctx = NULL; sock_ctx = verto_get_private(ev); free(sock_ctx); }