/* put a packet in the send queue. When the packet is actually sent, call send_callback. Useful for operations that must occur after sending a message, such as the switch to SASL encryption after as sucessful LDAP bind relpy. */ _PUBLIC_ NTSTATUS packet_send_callback(struct packet_context *pc, DATA_BLOB blob, packet_send_callback_fn_t send_callback, void *private_data) { struct send_element *el; el = talloc(pc, struct send_element); NT_STATUS_HAVE_NO_MEMORY(el); DLIST_ADD_END(pc->send_queue, el, struct send_element *); el->blob = blob; el->nsent = 0; el->send_callback = send_callback; el->send_callback_private = private_data; /* if we aren't going to free the packet then we must reference it to ensure it doesn't disappear before going out */ if (pc->nofree) { if (!talloc_reference(el, blob.data)) { return NT_STATUS_NO_MEMORY; } } else { talloc_steal(el, blob.data); } if (private_data && !talloc_reference(el, private_data)) { return NT_STATUS_NO_MEMORY; } TEVENT_FD_WRITEABLE(pc->fde); return NT_STATUS_OK; }
/* queue a datagram for send */ NTSTATUS nbt_dgram_send(struct nbt_dgram_socket *dgmsock, struct nbt_dgram_packet *packet, struct socket_address *dest) { struct nbt_dgram_request *req; NTSTATUS status = NT_STATUS_NO_MEMORY; enum ndr_err_code ndr_err; req = talloc(dgmsock, struct nbt_dgram_request); if (req == NULL) goto failed; req->dest = dest; if (talloc_reference(req, dest) == NULL) goto failed; ndr_err = ndr_push_struct_blob(&req->encoded, req, packet, (ndr_push_flags_fn_t)ndr_push_nbt_dgram_packet); if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { status = ndr_map_error2ntstatus(ndr_err); goto failed; } DLIST_ADD_END(dgmsock->send_queue, req); TEVENT_FD_WRITEABLE(dgmsock->fde); return NT_STATUS_OK; failed: talloc_free(req); return status; }
void websrv_output(struct websrv_context *web, const void *data, size_t length) { data_blob_append(web, &web->output.content, data, length); TEVENT_FD_NOT_READABLE(web->conn->event.fde); TEVENT_FD_WRITEABLE(web->conn->event.fde); web->output.output_pending = true; }
void sss_cmd_done(struct cli_ctx *cctx, void *freectx) { /* now that the packet is in place, unlock queue * making the event writable */ TEVENT_FD_WRITEABLE(cctx->cfde); /* free all request related data through the talloc hierarchy */ talloc_free(freectx); }
/* retry backed off messages */ static void msg_retry_timer(struct tevent_context *ev, struct tevent_timer *te, struct timeval t, void *private_data) { struct imessaging_context *msg = talloc_get_type(private_data, struct imessaging_context); msg->retry_te = NULL; /* put the messages back on the main queue */ while (msg->retry_queue) { struct imessaging_rec *rec = msg->retry_queue; DLIST_REMOVE(msg->retry_queue, rec); DLIST_ADD_END(msg->pending, rec, struct imessaging_rec *); } TEVENT_FD_WRITEABLE(msg->event.fde); }
/* setup the fd used by the queue */ int ctdb_queue_set_fd(struct ctdb_queue *queue, int fd) { queue->fd = fd; talloc_free(queue->fde); queue->fde = NULL; if (fd != -1) { queue->fde = tevent_add_fd(queue->ctdb->ev, queue, fd, TEVENT_FD_READ, queue_io_handler, queue); if (queue->fde == NULL) { return -1; } tevent_fd_set_auto_close(queue->fde); if (queue->out_queue) { TEVENT_FD_WRITEABLE(queue->fde); } } return 0; }
/* queue a packet for sending */ int ctdb_queue_send(struct ctdb_queue *queue, uint8_t *data, uint32_t length) { struct ctdb_req_header *hdr = (struct ctdb_req_header *)data; struct ctdb_queue_pkt *pkt; uint32_t length2, full_length; if (queue->alignment) { /* enforce the length and alignment rules from the tcp packet allocator */ length2 = (length+(queue->alignment-1)) & ~(queue->alignment-1); *(uint32_t *)data = length2; } else { length2 = length; } if (length2 != length) { memset(data+length, 0, length2-length); } full_length = length2; /* if the queue is empty then try an immediate write, avoiding queue overhead. This relies on non-blocking sockets */ if (queue->out_queue == NULL && queue->fd != -1 && !(queue->ctdb->flags & CTDB_FLAG_TORTURE)) { ssize_t n = write(queue->fd, data, length2); if (n == -1 && errno != EAGAIN && errno != EWOULDBLOCK) { talloc_free(queue->fde); queue->fde = NULL; queue->fd = -1; tevent_schedule_immediate(queue->im, queue->ctdb->ev, queue_dead, queue); /* yes, we report success, as the dead node is handled via a separate event */ return 0; } if (n > 0) { data += n; length2 -= n; } if (length2 == 0) return 0; } pkt = talloc_size( queue, offsetof(struct ctdb_queue_pkt, buf) + length2); CTDB_NO_MEMORY(queue->ctdb, pkt); talloc_set_name_const(pkt, "struct ctdb_queue_pkt"); pkt->data = pkt->buf; memcpy(pkt->data, data, length2); pkt->length = length2; pkt->full_length = full_length; if (queue->out_queue == NULL && queue->fd != -1) { TEVENT_FD_WRITEABLE(queue->fde); } DLIST_ADD_END(queue->out_queue, pkt, NULL); queue->out_queue_length++; if (queue->ctdb->tunable.verbose_memory_names != 0) { switch (hdr->operation) { case CTDB_REQ_CONTROL: { struct ctdb_req_control_old *c = (struct ctdb_req_control_old *)hdr; talloc_set_name(pkt, "ctdb_queue_pkt: %s control opcode=%u srvid=%llu datalen=%u", queue->name, (unsigned)c->opcode, (unsigned long long)c->srvid, (unsigned)c->datalen); break; } case CTDB_REQ_MESSAGE: { struct ctdb_req_message_old *m = (struct ctdb_req_message_old *)hdr; talloc_set_name(pkt, "ctdb_queue_pkt: %s message srvid=%llu datalen=%u", queue->name, (unsigned long long)m->srvid, (unsigned)m->datalen); break; } default: talloc_set_name(pkt, "ctdb_queue_pkt: %s operation=%u length=%u src=%u dest=%u", queue->name, (unsigned)hdr->operation, (unsigned)hdr->length, (unsigned)hdr->srcnode, (unsigned)hdr->destnode); break; } } return 0; }
krb5_error_code smb_krb5_send_and_recv_func(krb5_context context, void *data, krb5_krbhst_info *hi, time_t timeout, const krb5_data *send_buf, krb5_data *recv_buf) { krb5_error_code ret; NTSTATUS status; struct socket_address *remote_addr; const char *name; struct addrinfo *ai, *a; struct smb_krb5_socket *smb_krb5; struct tevent_context *ev = talloc_get_type(data, struct tevent_context); DATA_BLOB send_blob = data_blob_const(send_buf->data, send_buf->length); ret = krb5_krbhst_get_addrinfo(context, hi, &ai); if (ret) { return ret; } for (a = ai; a; a = ai->ai_next) { smb_krb5 = talloc(NULL, struct smb_krb5_socket); if (!smb_krb5) { return ENOMEM; } smb_krb5->hi = hi; switch (a->ai_family) { case PF_INET: name = "ipv4"; break; #ifdef HAVE_IPV6 case PF_INET6: name = "ipv6"; break; #endif default: talloc_free(smb_krb5); return EINVAL; } status = NT_STATUS_INVALID_PARAMETER; switch (hi->proto) { case KRB5_KRBHST_UDP: status = socket_create(name, SOCKET_TYPE_DGRAM, &smb_krb5->sock, 0); break; case KRB5_KRBHST_TCP: status = socket_create(name, SOCKET_TYPE_STREAM, &smb_krb5->sock, 0); break; case KRB5_KRBHST_HTTP: talloc_free(smb_krb5); return EINVAL; } if (!NT_STATUS_IS_OK(status)) { talloc_free(smb_krb5); continue; } talloc_steal(smb_krb5, smb_krb5->sock); remote_addr = socket_address_from_sockaddr(smb_krb5, a->ai_addr, a->ai_addrlen); if (!remote_addr) { talloc_free(smb_krb5); continue; } status = socket_connect_ev(smb_krb5->sock, NULL, remote_addr, 0, ev); if (!NT_STATUS_IS_OK(status)) { talloc_free(smb_krb5); continue; } talloc_free(remote_addr); /* Setup the FDE, start listening for read events * from the start (otherwise we may miss a socket * drop) and mark as AUTOCLOSE along with the fde */ /* Ths is equivilant to EVENT_FD_READABLE(smb_krb5->fde) */ smb_krb5->fde = tevent_add_fd(ev, smb_krb5->sock, socket_get_fd(smb_krb5->sock), TEVENT_FD_READ, smb_krb5_socket_handler, smb_krb5); /* its now the job of the event layer to close the socket */ tevent_fd_set_close_fn(smb_krb5->fde, socket_tevent_fd_close_fn); socket_set_flags(smb_krb5->sock, SOCKET_FLAG_NOCLOSE); tevent_add_timer(ev, smb_krb5, timeval_current_ofs(timeout, 0), smb_krb5_request_timeout, smb_krb5); smb_krb5->status = NT_STATUS_OK; smb_krb5->reply = data_blob(NULL, 0); switch (hi->proto) { case KRB5_KRBHST_UDP: TEVENT_FD_WRITEABLE(smb_krb5->fde); smb_krb5->request = send_blob; break; case KRB5_KRBHST_TCP: smb_krb5->packet = packet_init(smb_krb5); if (smb_krb5->packet == NULL) { talloc_free(smb_krb5); return ENOMEM; } packet_set_private(smb_krb5->packet, smb_krb5); packet_set_socket(smb_krb5->packet, smb_krb5->sock); packet_set_callback(smb_krb5->packet, smb_krb5_full_packet); packet_set_full_request(smb_krb5->packet, packet_full_request_u32); packet_set_error_handler(smb_krb5->packet, smb_krb5_error_handler); packet_set_event_context(smb_krb5->packet, ev); packet_set_fde(smb_krb5->packet, smb_krb5->fde); smb_krb5->request = data_blob_talloc(smb_krb5, NULL, send_blob.length + 4); RSIVAL(smb_krb5->request.data, 0, send_blob.length); memcpy(smb_krb5->request.data+4, send_blob.data, send_blob.length); packet_send(smb_krb5->packet, smb_krb5->request); break; case KRB5_KRBHST_HTTP: talloc_free(smb_krb5); return EINVAL; } while ((NT_STATUS_IS_OK(smb_krb5->status)) && !smb_krb5->reply.length) { if (tevent_loop_once(ev) != 0) { talloc_free(smb_krb5); return EINVAL; } } if (NT_STATUS_EQUAL(smb_krb5->status, NT_STATUS_IO_TIMEOUT)) { talloc_free(smb_krb5); continue; } if (!NT_STATUS_IS_OK(smb_krb5->status)) { DEBUG(2,("Error reading smb_krb5 reply packet: %s\n", nt_errstr(smb_krb5->status))); talloc_free(smb_krb5); continue; } ret = krb5_data_copy(recv_buf, smb_krb5->reply.data, smb_krb5->reply.length); if (ret) { talloc_free(smb_krb5); return ret; } talloc_free(smb_krb5); break; } if (a) { return 0; } return KRB5_KDC_UNREACH; }
krb5_error_code smb_krb5_send_and_recv_func(krb5_context context, void *data, krb5_krbhst_info *hi, time_t timeout, const krb5_data *send_buf, krb5_data *recv_buf) { krb5_error_code ret; NTSTATUS status; const char *name; struct addrinfo *ai, *a; struct smb_krb5_socket *smb_krb5; DATA_BLOB send_blob; struct tevent_context *ev; TALLOC_CTX *tmp_ctx = talloc_new(NULL); if (!tmp_ctx) { return ENOMEM; } if (!data) { /* If no event context was available, then create one for this loop */ ev = samba_tevent_context_init(tmp_ctx); if (!ev) { talloc_free(tmp_ctx); return ENOMEM; } } else { ev = talloc_get_type_abort(data, struct tevent_context); } send_blob = data_blob_const(send_buf->data, send_buf->length); ret = krb5_krbhst_get_addrinfo(context, hi, &ai); if (ret) { talloc_free(tmp_ctx); return ret; } for (a = ai; a; a = a->ai_next) { struct socket_address *remote_addr; smb_krb5 = talloc(tmp_ctx, struct smb_krb5_socket); if (!smb_krb5) { talloc_free(tmp_ctx); return ENOMEM; } smb_krb5->hi = hi; switch (a->ai_family) { case PF_INET: name = "ipv4"; break; #ifdef HAVE_IPV6 case PF_INET6: name = "ipv6"; break; #endif default: talloc_free(tmp_ctx); return EINVAL; } status = NT_STATUS_INVALID_PARAMETER; switch (hi->proto) { case KRB5_KRBHST_UDP: status = socket_create(name, SOCKET_TYPE_DGRAM, &smb_krb5->sock, 0); break; case KRB5_KRBHST_TCP: status = socket_create(name, SOCKET_TYPE_STREAM, &smb_krb5->sock, 0); break; case KRB5_KRBHST_HTTP: talloc_free(tmp_ctx); return EINVAL; } if (!NT_STATUS_IS_OK(status)) { talloc_free(smb_krb5); continue; } talloc_steal(smb_krb5, smb_krb5->sock); remote_addr = socket_address_from_sockaddr(smb_krb5, a->ai_addr, a->ai_addrlen); if (!remote_addr) { talloc_free(smb_krb5); continue; } status = socket_connect_ev(smb_krb5->sock, NULL, remote_addr, 0, ev); if (!NT_STATUS_IS_OK(status)) { talloc_free(smb_krb5); continue; } /* Setup the FDE, start listening for read events * from the start (otherwise we may miss a socket * drop) and mark as AUTOCLOSE along with the fde */ /* Ths is equivilant to EVENT_FD_READABLE(smb_krb5->fde) */ smb_krb5->fde = tevent_add_fd(ev, smb_krb5->sock, socket_get_fd(smb_krb5->sock), TEVENT_FD_READ, smb_krb5_socket_handler, smb_krb5); /* its now the job of the event layer to close the socket */ tevent_fd_set_close_fn(smb_krb5->fde, socket_tevent_fd_close_fn); socket_set_flags(smb_krb5->sock, SOCKET_FLAG_NOCLOSE); tevent_add_timer(ev, smb_krb5, timeval_current_ofs(timeout, 0), smb_krb5_request_timeout, smb_krb5); smb_krb5->status = NT_STATUS_OK; smb_krb5->reply = data_blob(NULL, 0); switch (hi->proto) { case KRB5_KRBHST_UDP: TEVENT_FD_WRITEABLE(smb_krb5->fde); smb_krb5->request = send_blob; break; case KRB5_KRBHST_TCP: smb_krb5->packet = packet_init(smb_krb5); if (smb_krb5->packet == NULL) { talloc_free(smb_krb5); return ENOMEM; } packet_set_private(smb_krb5->packet, smb_krb5); packet_set_socket(smb_krb5->packet, smb_krb5->sock); packet_set_callback(smb_krb5->packet, smb_krb5_full_packet); packet_set_full_request(smb_krb5->packet, packet_full_request_u32); packet_set_error_handler(smb_krb5->packet, smb_krb5_error_handler); packet_set_event_context(smb_krb5->packet, ev); packet_set_fde(smb_krb5->packet, smb_krb5->fde); smb_krb5->request = data_blob_talloc(smb_krb5, NULL, send_blob.length + 4); RSIVAL(smb_krb5->request.data, 0, send_blob.length); memcpy(smb_krb5->request.data+4, send_blob.data, send_blob.length); packet_send(smb_krb5->packet, smb_krb5->request); break; case KRB5_KRBHST_HTTP: talloc_free(tmp_ctx); return EINVAL; } while ((NT_STATUS_IS_OK(smb_krb5->status)) && !smb_krb5->reply.length) { if (tevent_loop_once(ev) != 0) { talloc_free(tmp_ctx); return EINVAL; } /* After each and every event loop, reset the * send_to_kdc pointers to what they were when * we entered this loop. That way, if a * nested event has invalidated them, we put * it back before we return to the heimdal * code */ ret = krb5_set_send_to_kdc_func(context, smb_krb5_send_and_recv_func, data); if (ret != 0) { talloc_free(tmp_ctx); return ret; } } if (NT_STATUS_EQUAL(smb_krb5->status, NT_STATUS_IO_TIMEOUT)) { talloc_free(smb_krb5); continue; } if (!NT_STATUS_IS_OK(smb_krb5->status)) { struct tsocket_address *addr = socket_address_to_tsocket_address(smb_krb5, remote_addr); const char *addr_string = NULL; if (addr) { addr_string = tsocket_address_inet_addr_string(addr, smb_krb5); } else { addr_string = NULL; } DEBUG(2,("Error reading smb_krb5 reply packet: %s from %s\n", nt_errstr(smb_krb5->status), addr_string)); talloc_free(smb_krb5); continue; } ret = krb5_data_copy(recv_buf, smb_krb5->reply.data, smb_krb5->reply.length); if (ret) { talloc_free(tmp_ctx); return ret; } talloc_free(smb_krb5); break; } talloc_free(tmp_ctx); if (a) { return 0; } return KRB5_KDC_UNREACH; }