/** * @brief This function destroys the attached private data * @param[in] req The request to poll * @param[in] ev The tevent_context to be used * @retval If a critical error has happened in the * tevent loop layer false is returned. * Otherwise true is returned. * This is not the return value of the given request! * * This function can be used to actively poll for the * given request to finish. * * Note: this should only be used if the given tevent context * was created by the caller, to avoid event loop nesting. * * This function is typically used by sync wrapper functions. */ bool tevent_req_poll(struct tevent_req *req, struct tevent_context *ev) { while (tevent_req_is_in_progress(req)) { int ret; ret = tevent_loop_once(ev); if (ret != 0) { return false; } } return true; }
struct tevent_req *tsocket_writev_send(struct tsocket_context *sock, TALLOC_CTX *mem_ctx, const struct iovec *vector, size_t count) { struct tevent_req *req; struct tsocket_writev_state *state; int ret; int err; bool dummy; int to_write = 0; size_t i; req = tevent_req_create(mem_ctx, &state, struct tsocket_writev_state); if (!req) { return NULL; } state->caller.sock = sock; state->caller.vector = vector; state->caller.count = count; state->iov = NULL; state->count = count; state->total_written = 0; state->iov = talloc_array(state, struct iovec, count); if (tevent_req_nomem(state->iov, req)) { goto post; } memcpy(state->iov, vector, sizeof(struct iovec) * count); for (i=0; i < count; i++) { int tmp = to_write; tmp += state->iov[i].iov_len; if (tmp < to_write) { tevent_req_error(req, EMSGSIZE); goto post; } to_write = tmp; } if (to_write == 0) { tevent_req_done(req); goto post; } /* * this is a fast path, not waiting for the * socket to become explicit writeable gains * about 10%-20% performance in benchmark tests. */ tsocket_writev_handler(sock, req); if (!tevent_req_is_in_progress(req)) { goto post; } talloc_set_destructor(state, tsocket_writev_state_destructor); ret = tsocket_set_writeable_handler(sock, tsocket_writev_handler, req); err = tsocket_error_from_errno(ret, errno, &dummy); if (tevent_req_error(req, err)) { goto post; } return req; post: return tevent_req_post(req, sock->event.ctx); }
/* put a request into the send queue */ void smb2_transport_send(struct smb2_request *req) { NTSTATUS status; struct smb2_transport *transport = req->transport; struct tevent_req **reqs = transport->compound.reqs; size_t num_reqs = talloc_array_length(reqs); size_t i; uint16_t cmd = SVAL(req->out.hdr, SMB2_HDR_OPCODE); uint32_t additional_flags = IVAL(req->out.hdr, SMB2_HDR_FLAGS); uint32_t clear_flags = 0; uint32_t pid = IVAL(req->out.hdr, SMB2_HDR_PID); uint32_t tid = IVAL(req->out.hdr, SMB2_HDR_TID); struct smbXcli_session *session = NULL; bool need_pending_break = false; size_t hdr_ofs; size_t pdu_len; DATA_BLOB body = data_blob_null; DATA_BLOB dyn = data_blob_null; uint32_t timeout_msec = transport->options.request_timeout * 1000; if (transport->oplock.handler) { need_pending_break = true; } if (transport->lease.handler) { need_pending_break = true; } if (transport->break_subreq) { need_pending_break = false; } if (need_pending_break) { struct tevent_req *subreq; subreq = smb2cli_req_create(transport, transport->ev, transport->conn, SMB2_OP_BREAK, 0, /* additional_flags */ 0, /*clear_flags */ 0, /* timeout_msec */ 0, /* pid */ 0, /* tid */ NULL, /* session */ NULL, /* body */ 0, /* body_fixed */ NULL, /* dyn */ 0); /* dyn_len */ if (subreq != NULL) { smbXcli_req_set_pending(subreq); tevent_req_set_callback(subreq, smb2_transport_break_handler, transport); transport->break_subreq = subreq; } } if (req->session) { session = req->session->smbXcli; } if (transport->compound.related) { additional_flags |= SMB2_HDR_FLAG_CHAINED; } hdr_ofs = PTR_DIFF(req->out.hdr, req->out.buffer); pdu_len = req->out.size - hdr_ofs; body.data = req->out.body; body.length = req->out.body_fixed; dyn.data = req->out.body + req->out.body_fixed; dyn.length = pdu_len - (SMB2_HDR_BODY + req->out.body_fixed); req->subreq = smb2cli_req_create(req, transport->ev, transport->conn, cmd, additional_flags, clear_flags, timeout_msec, pid, tid, session, body.data, body.length, dyn.data, dyn.length); if (req->subreq == NULL) { req->state = SMB2_REQUEST_ERROR; req->status = NT_STATUS_NO_MEMORY; return; } if (!tevent_req_is_in_progress(req->subreq)) { req->state = SMB2_REQUEST_ERROR; req->status = NT_STATUS_INTERNAL_ERROR;/* TODO */ return; } tevent_req_set_callback(req->subreq, smb2_request_done, req); smb2cli_req_set_notify_async(req->subreq); if (req->credit_charge) { smb2cli_req_set_credit_charge(req->subreq, req->credit_charge); } ZERO_STRUCT(req->out); req->state = SMB2_REQUEST_RECV; if (num_reqs > 0) { for (i=0; i < num_reqs; i++) { if (reqs[i] != NULL) { continue; } reqs[i] = req->subreq; i++; break; } if (i < num_reqs) { return; } } else { reqs = &req->subreq; num_reqs = 1; } status = smb2cli_req_compound_submit(reqs, num_reqs); TALLOC_FREE(transport->compound.reqs); if (!NT_STATUS_IS_OK(status)) { req->status = status; req->state = SMB2_REQUEST_ERROR; smbXcli_conn_disconnect(transport->conn, status); } }