static int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) { int rc = 0; struct kvec *iov; int n_vec; unsigned int send_length = 0; unsigned int i, j; sigset_t mask, oldmask; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg; int val = 1; __be32 rfc1002_marker; if (cifs_rdma_enabled(server) && server->smbd_conn) { rc = smbd_send(server, num_rqst, rqst); goto smbd_done; } if (ssocket == NULL) return -EAGAIN; if (signal_pending(current)) { cifs_dbg(FYI, "signal is pending before sending any data\n"); return -EINTR; } /* cork the socket */ kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); for (j = 0; j < num_rqst; j++) send_length += smb_rqst_len(server, &rqst[j]); rfc1002_marker = cpu_to_be32(send_length); /* * We should not allow signals to interrupt the network send because * any partial send will cause session reconnects thus increasing * latency of system calls and overload a server with unnecessary * requests. */ sigfillset(&mask); sigprocmask(SIG_BLOCK, &mask, &oldmask); /* Generate a rfc1002 marker for SMB2+ */ if (server->vals->header_preamble_size == 0) { struct kvec hiov = { .iov_base = &rfc1002_marker, .iov_len = 4 }; iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto unmask; total_len += sent; send_length += 4; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length); for (j = 0; j < num_rqst; j++) { iov = rqst[j].rq_iov; n_vec = rqst[j].rq_nvec; size = 0; for (i = 0; i < n_vec; i++) { dump_smb(iov[i].iov_base, iov[i].iov_len); size += iov[i].iov_len; } iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto unmask; total_len += sent; /* now walk the page array and send each page in it */ for (i = 0; i < rqst[j].rq_npages; i++) { struct bio_vec bvec; bvec.bv_page = rqst[j].rq_pages[i]; rqst_page_get_length(&rqst[j], i, &bvec.bv_len, &bvec.bv_offset); iov_iter_bvec(&smb_msg.msg_iter, WRITE, &bvec, 1, bvec.bv_len); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; total_len += sent; } } unmask: sigprocmask(SIG_SETMASK, &oldmask, NULL); /* * If signal is pending but we have already sent the whole packet to * the server we need to return success status to allow a corresponding * mid entry to be kept in the pending requests queue thus allowing * to handle responses from the server by the client. * * If only part of the packet has been sent there is no need to hide * interrupt because the session will be reconnected anyway, so there * won't be any response from the server to handle. */ if (signal_pending(current) && (total_len != send_length)) { cifs_dbg(FYI, "signal is pending after attempt to send\n"); rc = -EINTR; } /* uncork it */ val = 0; kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != send_length)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", send_length, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; trace_smb3_partial_send_reconnect(server->CurrentMid, server->hostname); } smbd_done: if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); else if (rc > 0) rc = 0; return rc; } static int smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int flags) { struct kvec iov; struct smb2_transform_hdr tr_hdr; struct smb_rqst cur_rqst[MAX_COMPOUND]; int rc; if (!(flags & CIFS_TRANSFORM_REQ)) return __smb_send_rqst(server, num_rqst, rqst); if (num_rqst > MAX_COMPOUND - 1) return -ENOMEM; memset(&cur_rqst[0], 0, sizeof(cur_rqst)); memset(&iov, 0, sizeof(iov)); memset(&tr_hdr, 0, sizeof(tr_hdr)); iov.iov_base = &tr_hdr; iov.iov_len = sizeof(tr_hdr); cur_rqst[0].rq_iov = &iov; cur_rqst[0].rq_nvec = 1; if (!server->ops->init_transform_rq) { cifs_dbg(VFS, "Encryption requested but transform callback " "is missing\n"); return -EIO; } rc = server->ops->init_transform_rq(server, num_rqst + 1, &cur_rqst[0], rqst); if (rc) return rc; rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); return rc; } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; iov[0].iov_base = smb_buffer; iov[0].iov_len = 4; iov[1].iov_base = (char *)smb_buffer + 4; iov[1].iov_len = smb_buf_length; return __smb_send_rqst(server, 1, &rqst); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits, const int timeout, const int flags, unsigned int *instance) { int rc; int *credits; int optype; long int t; if (timeout < 0) t = MAX_JIFFY_OFFSET; else t = msecs_to_jiffies(timeout); optype = flags & CIFS_OP_MASK; *instance = 0; credits = server->ops->get_credits_field(server, optype); /* Since an echo is already inflight, no need to wait to send another */ if (*credits <= 0 && optype == CIFS_ECHO_OP) return -EAGAIN; spin_lock(&server->req_lock); if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; *instance = server->reconnect_instance; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits < num_credits) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable_timeout(server->request_q, has_credits(server, credits, num_credits), t); cifs_num_waiters_dec(server); if (!rc) { trace_smb3_credit_timeout(server->CurrentMid, server->hostname, num_credits); cifs_dbg(VFS, "wait timed out after %d ms\n", timeout); return -ENOTSUPP; } if (rc == -ERESTARTSYS) return -ERESTARTSYS; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * For normal commands, reserve the last MAX_COMPOUND * credits to compound requests. * Otherwise these compounds could be permanently * starved for credits by single-credit requests. * * To prevent spinning CPU, block this thread until * there are >MAX_COMPOUND credits available. * But only do this is we already have a lot of * credits in flight to avoid triggering this check * for servers that are slow to hand out credits on * new sessions. */ if (!optype && num_credits == 1 && server->in_flight > 2 * MAX_COMPOUND && *credits <= MAX_COMPOUND) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable_timeout( server->request_q, has_credits(server, credits, MAX_COMPOUND + 1), t); cifs_num_waiters_dec(server); if (!rc) { trace_smb3_credit_timeout( server->CurrentMid, server->hostname, num_credits); cifs_dbg(VFS, "wait timed out after %d ms\n", timeout); return -ENOTSUPP; } if (rc == -ERESTARTSYS) return -ERESTARTSYS; spin_lock(&server->req_lock); continue; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) { *credits -= num_credits; server->in_flight += num_credits; *instance = server->reconnect_instance; } spin_unlock(&server->req_lock); break; } } return 0; } static int wait_for_free_request(struct TCP_Server_Info *server, const int flags, unsigned int *instance) { return wait_for_free_credits(server, 1, -1, flags, instance); } static int wait_for_compound_request(struct TCP_Server_Info *server, int num, const int flags, unsigned int *instance) { int *credits; credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK); spin_lock(&server->req_lock); if (*credits < num) { /* * Return immediately if not too many requests in flight since * we will likely be stuck on waiting for credits. */ if (server->in_flight < num - *credits) { spin_unlock(&server->req_lock); return -ENOTSUPP; } } spin_unlock(&server->req_lock); return wait_for_free_credits(server, num, 60000, flags, instance); }
static int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) { int rc = 0; struct kvec *iov; int n_vec; unsigned int send_length = 0; unsigned int i, j; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg; int val = 1; __be32 rfc1002_marker; if (cifs_rdma_enabled(server) && server->smbd_conn) { rc = smbd_send(server, rqst); goto smbd_done; } if (ssocket == NULL) return -ENOTSOCK; /* cork the socket */ kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); for (j = 0; j < num_rqst; j++) send_length += smb_rqst_len(server, &rqst[j]); rfc1002_marker = cpu_to_be32(send_length); /* Generate a rfc1002 marker for SMB2+ */ if (server->vals->header_preamble_size == 0) { struct kvec hiov = { .iov_base = &rfc1002_marker, .iov_len = 4 }; iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; total_len += sent; send_length += 4; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length); for (j = 0; j < num_rqst; j++) { iov = rqst[j].rq_iov; n_vec = rqst[j].rq_nvec; size = 0; for (i = 0; i < n_vec; i++) { dump_smb(iov[i].iov_base, iov[i].iov_len); size += iov[i].iov_len; } iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; total_len += sent; /* now walk the page array and send each page in it */ for (i = 0; i < rqst[j].rq_npages; i++) { struct bio_vec bvec; bvec.bv_page = rqst[j].rq_pages[i]; rqst_page_get_length(&rqst[j], i, &bvec.bv_len, &bvec.bv_offset); iov_iter_bvec(&smb_msg.msg_iter, WRITE, &bvec, 1, bvec.bv_len); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; total_len += sent; } } uncork: /* uncork it */ val = 0; kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != send_length)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", send_length, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; trace_smb3_partial_send_reconnect(server->CurrentMid, server->hostname); } smbd_done: if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); else if (rc > 0) rc = 0; return rc; } static int smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int flags) { struct kvec iov; struct smb2_transform_hdr tr_hdr; struct smb_rqst cur_rqst[MAX_COMPOUND]; int rc; if (!(flags & CIFS_TRANSFORM_REQ)) return __smb_send_rqst(server, num_rqst, rqst); if (num_rqst > MAX_COMPOUND - 1) return -ENOMEM; memset(&cur_rqst[0], 0, sizeof(cur_rqst)); memset(&iov, 0, sizeof(iov)); memset(&tr_hdr, 0, sizeof(tr_hdr)); iov.iov_base = &tr_hdr; iov.iov_len = sizeof(tr_hdr); cur_rqst[0].rq_iov = &iov; cur_rqst[0].rq_nvec = 1; if (!server->ops->init_transform_rq) { cifs_dbg(VFS, "Encryption requested but transform callback " "is missing\n"); return -EIO; } rc = server->ops->init_transform_rq(server, num_rqst + 1, &cur_rqst[0], rqst); if (rc) return rc; rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); return rc; } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; iov[0].iov_base = smb_buffer; iov[0].iov_len = 4; iov[1].iov_base = (char *)smb_buffer + 4; iov[1].iov_len = smb_buf_length; return __smb_send_rqst(server, 1, &rqst); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int timeout, int *credits) { int rc; spin_lock(&server->req_lock); if (timeout == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, has_credits(server, credits)); cifs_num_waiters_dec(server); if (rc) return rc; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if (timeout != CIFS_BLOCKING_OP) { *credits -= 1; server->in_flight++; } spin_unlock(&server->req_lock); break; } } return 0; } static int wait_for_free_request(struct TCP_Server_Info *server, const int timeout, const int optype) { int *val; val = server->ops->get_credits_field(server, optype); /* Since an echo is already inflight, no need to wait to send another */ if (*val <= 0 && optype == CIFS_ECHO_OP) return -EAGAIN; return wait_for_free_credits(server, timeout, val); }