Ejemplo n.º 1
0
Archivo: rxrpc.c Proyecto: 020gzh/linux
/*
 * send an empty reply
 */
void afs_send_empty_reply(struct afs_call *call)
{
	struct msghdr msg;

	_enter("");

	msg.msg_name		= NULL;
	msg.msg_namelen		= 0;
	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
	msg.msg_control		= NULL;
	msg.msg_controllen	= 0;
	msg.msg_flags		= 0;

	call->state = AFS_CALL_AWAIT_ACK;
	switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) {
	case 0:
		_leave(" [replied]");
		return;

	case -ENOMEM:
		_debug("oom");
		rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
	default:
		afs_end_call(call);
		_leave(" [error]");
		return;
	}
}
Ejemplo n.º 2
0
Archivo: rxrpc.c Proyecto: 020gzh/linux
/*
 * send a simple reply
 */
void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
{
	struct msghdr msg;
	struct kvec iov[1];
	int n;

	_enter("");

	iov[0].iov_base		= (void *) buf;
	iov[0].iov_len		= len;
	msg.msg_name		= NULL;
	msg.msg_namelen		= 0;
	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
	msg.msg_control		= NULL;
	msg.msg_controllen	= 0;
	msg.msg_flags		= 0;

	call->state = AFS_CALL_AWAIT_ACK;
	n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
	if (n >= 0) {
		/* Success */
		_leave(" [replied]");
		return;
	}

	if (n == -ENOMEM) {
		_debug("oom");
		rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
	}
	afs_end_call(call);
	_leave(" [error]");
}
Ejemplo n.º 3
0
Archivo: rxrpc.c Proyecto: krzk/linux
/*
 * send an empty reply
 */
void afs_send_empty_reply(struct afs_call *call)
{
	struct afs_net *net = call->net;
	struct msghdr msg;

	_enter("");

	rxrpc_kernel_set_tx_length(net->socket, call->rxcall, 0);

	msg.msg_name		= NULL;
	msg.msg_namelen		= 0;
	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
	msg.msg_control		= NULL;
	msg.msg_controllen	= 0;
	msg.msg_flags		= 0;

	switch (rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, 0,
				       afs_notify_end_reply_tx)) {
	case 0:
		_leave(" [replied]");
		return;

	case -ENOMEM:
		_debug("oom");
		rxrpc_kernel_abort_call(net->socket, call->rxcall,
					RX_USER_ABORT, -ENOMEM, "KOO");
	default:
		_leave(" [error]");
		return;
	}
}
Ejemplo n.º 4
0
Archivo: rxrpc.c Proyecto: krzk/linux
/*
 * send a simple reply
 */
void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
{
	struct afs_net *net = call->net;
	struct msghdr msg;
	struct kvec iov[1];
	int n;

	_enter("");

	rxrpc_kernel_set_tx_length(net->socket, call->rxcall, len);

	iov[0].iov_base		= (void *) buf;
	iov[0].iov_len		= len;
	msg.msg_name		= NULL;
	msg.msg_namelen		= 0;
	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
	msg.msg_control		= NULL;
	msg.msg_controllen	= 0;
	msg.msg_flags		= 0;

	n = rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, len,
				   afs_notify_end_reply_tx);
	if (n >= 0) {
		/* Success */
		_leave(" [replied]");
		return;
	}

	if (n == -ENOMEM) {
		_debug("oom");
		rxrpc_kernel_abort_call(net->socket, call->rxcall,
					RX_USER_ABORT, -ENOMEM, "KOO");
	}
	_leave(" [error]");
}
Ejemplo n.º 5
0
ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
			   void *buffer, size_t buffer_size)
{
	ssize_t retval;
	u64 attr_size;
	struct p9_fid *attr_fid;
	struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size};
	struct iov_iter to;
	int err;

	iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size);

	attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
	if (IS_ERR(attr_fid)) {
		retval = PTR_ERR(attr_fid);
		p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n",
			 retval);
		return retval;
	}
	if (attr_size > buffer_size) {
		if (!buffer_size) /* request to get the attr_size */
			retval = attr_size;
		else
			retval = -ERANGE;
	} else {
		iov_iter_truncate(&to, attr_size);
		retval = p9_client_read(attr_fid, 0, &to, &err);
		if (err)
			retval = err;
	}
	p9_client_clunk(attr_fid);
	return retval;
}
Ejemplo n.º 6
0
static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
{
	bool over;
	struct p9_wstat st;
	int err = 0;
	struct p9_fid *fid;
	int buflen;
	int reclen = 0;
	struct p9_rdir *rdir;
	struct kvec kvec;

	p9_debug(P9_DEBUG_VFS, "name %pD\n", file);
	fid = file->private_data;

	buflen = fid->clnt->msize - P9_IOHDRSZ;

	rdir = v9fs_alloc_rdir_buf(file, buflen);
	if (!rdir)
		return -ENOMEM;
	kvec.iov_base = rdir->buf;
	kvec.iov_len = buflen;

	while (1) {
		if (rdir->tail == rdir->head) {
			struct iov_iter to;
			int n;
			iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen);
			n = p9_client_read(file->private_data, ctx->pos, &to,
					   &err);
			if (err)
				return err;

			rdir->head = 0;
			rdir->tail = n;
		}
		while (rdir->head < rdir->tail) {
			p9stat_init(&st);
			err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
					  rdir->tail - rdir->head, &st);
			if (err) {
				p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
				p9stat_free(&st);
				return -EIO;
			}
			reclen = st.size+2;

			over = !dir_emit(ctx, st.name, strlen(st.name),
					 v9fs_qid2ino(&st.qid), dt_type(&st));
			p9stat_free(&st);
			if (over)
				return 0;

			rdir->head += reclen;
			ctx->pos += reclen;
		}
	}
}
Ejemplo n.º 7
0
int
lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
{
	int rc;
	long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
	unsigned long then;
	struct timeval tv;
	struct kvec  iov = { .iov_base = buffer, .iov_len  = nob };
	struct msghdr msg = {NULL,};

	LASSERT(nob > 0);
	/*
	 * Caller may pass a zero timeout if she thinks the socket buffer is
	 * empty enough to take the whole message immediately
	 */
	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, nob);
	for (;;) {
		msg.msg_flags = !timeout ? MSG_DONTWAIT : 0;
		if (timeout) {
			/* Set send timeout to remaining time */
			jiffies_to_timeval(jiffies_left, &tv);
			rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
					       (char *)&tv, sizeof(tv));
			if (rc) {
				CERROR("Can't set socket send timeout %ld.%06d: %d\n",
				       (long)tv.tv_sec, (int)tv.tv_usec, rc);
				return rc;
			}
		}

		then = jiffies;
		rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
		jiffies_left -= jiffies - then;

		if (rc < 0)
			return rc;

		if (!rc) {
			CERROR("Unexpected zero rc\n");
			return -ECONNABORTED;
		}

		if (!msg_data_left(&msg))
			break;

		if (jiffies_left <= 0)
			return -EAGAIN;
	}
	return 0;
}
Ejemplo n.º 8
0
/* Receive data over TCP/IP. */
int usbip_recv(struct socket *sock, void *buf, int size)
{
	int result;
	struct kvec iov = {.iov_base = buf, .iov_len = size};
	struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
	int total = 0;

	iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);

	usbip_dbg_xmit("enter\n");

	if (!sock || !buf || !size) {
		pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
		       size);
		return -EINVAL;
	}

	do {
		int sz = msg_data_left(&msg);
		sock->sk->sk_allocation = GFP_NOIO;

		result = sock_recvmsg(sock, &msg, MSG_WAITALL);
		if (result <= 0) {
			pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
				 sock, buf + total, sz, result, total);
			goto err;
		}

		total += result;
	} while (msg_data_left(&msg));

	if (usbip_dbg_flag_xmit) {
		if (!in_interrupt())
			pr_debug("%-10s:", current->comm);
		else
			pr_debug("interrupt  :");

		pr_debug("receiving....\n");
		usbip_dump_buffer(buf, size);
		pr_debug("received, osize %d ret %d size %zd total %d\n",
			 size, result, msg_data_left(&msg), total);
	}

	return total;

err:
	return result;
}
Ejemplo n.º 9
0
int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
		   const void *value, size_t value_len, int flags)
{
	struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
	struct iov_iter from;
	int retval;

	iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);

	p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
		 name, value_len, flags);

	/* Clone it */
	fid = p9_client_walk(fid, 0, NULL, 1);
	if (IS_ERR(fid))
		return PTR_ERR(fid);

	/*
	 * On success fid points to xattr
	 */
	retval = p9_client_xattrcreate(fid, name, value_len, flags);
	if (retval < 0)
		p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n",
			 retval);
	else
		p9_client_write(fid, 0, &from, &retval);
	p9_client_clunk(fid);
	return retval;
}

ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
	return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
}

const struct xattr_handler *v9fs_xattr_handlers[] = {
	&v9fs_xattr_user_handler,
	&v9fs_xattr_trusted_handler,
#ifdef CONFIG_9P_FS_POSIX_ACL
	&v9fs_xattr_acl_access_handler,
	&v9fs_xattr_acl_default_handler,
#endif
#ifdef CONFIG_9P_FS_SECURITY
	&v9fs_xattr_security_handler,
#endif
	NULL
};
Ejemplo n.º 10
0
int
ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
	struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
	struct socket *sock = conn->ksnc_sock;
	int nob, i;

	if (*ksocknal_tunables.ksnd_enable_csum	&& /* checksum enabled */
	    conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection  */
	    tx->tx_nob == tx->tx_resid		 && /* frist sending    */
	    !tx->tx_msg.ksm_csum)		     /* not checksummed  */
		ksocknal_lib_csum_tx(tx);

	for (nob = i = 0; i < tx->tx_niov; i++)
		nob += tx->tx_iov[i].iov_len;

	if (!list_empty(&conn->ksnc_tx_queue) ||
	    nob < tx->tx_resid)
		msg.msg_flags |= MSG_MORE;

	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
		      tx->tx_iov, tx->tx_niov, nob);
	return sock_sendmsg(sock, &msg);
}
Ejemplo n.º 11
0
static int
__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
		struct smb_rqst *rqst)
{
	int rc = 0;
	struct kvec *iov;
	int n_vec;
	unsigned int send_length = 0;
	unsigned int i, j;
	size_t total_len = 0, sent, size;
	struct socket *ssocket = server->ssocket;
	struct msghdr smb_msg;
	int val = 1;
	__be32 rfc1002_marker;

	if (cifs_rdma_enabled(server) && server->smbd_conn) {
		rc = smbd_send(server, rqst);
		goto smbd_done;
	}
	if (ssocket == NULL)
		return -ENOTSOCK;

	/* cork the socket */
	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
				(char *)&val, sizeof(val));

	for (j = 0; j < num_rqst; j++)
		send_length += smb_rqst_len(server, &rqst[j]);
	rfc1002_marker = cpu_to_be32(send_length);

	/* Generate a rfc1002 marker for SMB2+ */
	if (server->vals->header_preamble_size == 0) {
		struct kvec hiov = {
			.iov_base = &rfc1002_marker,
			.iov_len  = 4
		};
		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
		rc = smb_send_kvec(server, &smb_msg, &sent);
		if (rc < 0)
			goto uncork;

		total_len += sent;
		send_length += 4;
	}

	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);

	for (j = 0; j < num_rqst; j++) {
		iov = rqst[j].rq_iov;
		n_vec = rqst[j].rq_nvec;

		size = 0;
		for (i = 0; i < n_vec; i++) {
			dump_smb(iov[i].iov_base, iov[i].iov_len);
			size += iov[i].iov_len;
		}

		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);

		rc = smb_send_kvec(server, &smb_msg, &sent);
		if (rc < 0)
			goto uncork;

		total_len += sent;

		/* now walk the page array and send each page in it */
		for (i = 0; i < rqst[j].rq_npages; i++) {
			struct bio_vec bvec;

			bvec.bv_page = rqst[j].rq_pages[i];
			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
					     &bvec.bv_offset);

			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
				      &bvec, 1, bvec.bv_len);
			rc = smb_send_kvec(server, &smb_msg, &sent);
			if (rc < 0)
				break;

			total_len += sent;
		}
	}

uncork:
	/* uncork it */
	val = 0;
	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
				(char *)&val, sizeof(val));

	if ((total_len > 0) && (total_len != send_length)) {
		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
			 send_length, total_len);
		/*
		 * If we have only sent part of an SMB then the next SMB could
		 * be taken as the remainder of this one. We need to kill the
		 * socket so the server throws away the partial SMB
		 */
		server->tcpStatus = CifsNeedReconnect;
		trace_smb3_partial_send_reconnect(server->CurrentMid,
						  server->hostname);
	}
smbd_done:
	if (rc < 0 && rc != -EINTR)
		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
			 rc);
	else if (rc > 0)
		rc = 0;

	return rc;
}

static int
smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
	      struct smb_rqst *rqst, int flags)
{
	struct kvec iov;
	struct smb2_transform_hdr tr_hdr;
	struct smb_rqst cur_rqst[MAX_COMPOUND];
	int rc;

	if (!(flags & CIFS_TRANSFORM_REQ))
		return __smb_send_rqst(server, num_rqst, rqst);

	if (num_rqst > MAX_COMPOUND - 1)
		return -ENOMEM;

	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
	memset(&iov, 0, sizeof(iov));
	memset(&tr_hdr, 0, sizeof(tr_hdr));

	iov.iov_base = &tr_hdr;
	iov.iov_len = sizeof(tr_hdr);
	cur_rqst[0].rq_iov = &iov;
	cur_rqst[0].rq_nvec = 1;

	if (!server->ops->init_transform_rq) {
		cifs_dbg(VFS, "Encryption requested but transform callback "
			 "is missing\n");
		return -EIO;
	}

	rc = server->ops->init_transform_rq(server, num_rqst + 1,
					    &cur_rqst[0], rqst);
	if (rc)
		return rc;

	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
	return rc;
}

int
smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
	 unsigned int smb_buf_length)
{
	struct kvec iov[2];
	struct smb_rqst rqst = { .rq_iov = iov,
				 .rq_nvec = 2 };

	iov[0].iov_base = smb_buffer;
	iov[0].iov_len = 4;
	iov[1].iov_base = (char *)smb_buffer + 4;
	iov[1].iov_len = smb_buf_length;

	return __smb_send_rqst(server, 1, &rqst);
}

static int
wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
		      int *credits)
{
	int rc;

	spin_lock(&server->req_lock);
	if (timeout == CIFS_ASYNC_OP) {
		/* oplock breaks must not be held up */
		server->in_flight++;
		*credits -= 1;
		spin_unlock(&server->req_lock);
		return 0;
	}

	while (1) {
		if (*credits <= 0) {
			spin_unlock(&server->req_lock);
			cifs_num_waiters_inc(server);
			rc = wait_event_killable(server->request_q,
						 has_credits(server, credits));
			cifs_num_waiters_dec(server);
			if (rc)
				return rc;
			spin_lock(&server->req_lock);
		} else {
			if (server->tcpStatus == CifsExiting) {
				spin_unlock(&server->req_lock);
				return -ENOENT;
			}

			/*
			 * Can not count locking commands against total
			 * as they are allowed to block on server.
			 */

			/* update # of requests on the wire to server */
			if (timeout != CIFS_BLOCKING_OP) {
				*credits -= 1;
				server->in_flight++;
			}
			spin_unlock(&server->req_lock);
			break;
		}
	}
	return 0;
}

static int
wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
		      const int optype)
{
	int *val;

	val = server->ops->get_credits_field(server, optype);
	/* Since an echo is already inflight, no need to wait to send another */
	if (*val <= 0 && optype == CIFS_ECHO_OP)
		return -EAGAIN;
	return wait_for_free_credits(server, timeout, val);
}
Ejemplo n.º 12
0
Archivo: rxrpc.c Proyecto: 020gzh/linux
/*
 * initiate a call
 */
int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
		  const struct afs_wait_mode *wait_mode)
{
	struct sockaddr_rxrpc srx;
	struct rxrpc_call *rxcall;
	struct msghdr msg;
	struct kvec iov[1];
	int ret;
	struct sk_buff *skb;

	_enter("%x,{%d},", addr->s_addr, ntohs(call->port));

	ASSERT(call->type != NULL);
	ASSERT(call->type->name != NULL);

	_debug("____MAKE %p{%s,%x} [%d]____",
	       call, call->type->name, key_serial(call->key),
	       atomic_read(&afs_outstanding_calls));

	call->wait_mode = wait_mode;
	call->async_workfn = afs_process_async_call;
	INIT_WORK(&call->async_work, afs_async_workfn);

	memset(&srx, 0, sizeof(srx));
	srx.srx_family = AF_RXRPC;
	srx.srx_service = call->service_id;
	srx.transport_type = SOCK_DGRAM;
	srx.transport_len = sizeof(srx.transport.sin);
	srx.transport.sin.sin_family = AF_INET;
	srx.transport.sin.sin_port = call->port;
	memcpy(&srx.transport.sin.sin_addr, addr, 4);

	/* create a call */
	rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
					 (unsigned long) call, gfp);
	call->key = NULL;
	if (IS_ERR(rxcall)) {
		ret = PTR_ERR(rxcall);
		goto error_kill_call;
	}

	call->rxcall = rxcall;

	/* send the request */
	iov[0].iov_base	= call->request;
	iov[0].iov_len	= call->request_size;

	msg.msg_name		= NULL;
	msg.msg_namelen		= 0;
	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
		      call->request_size);
	msg.msg_control		= NULL;
	msg.msg_controllen	= 0;
	msg.msg_flags		= (call->send_pages ? MSG_MORE : 0);

	/* have to change the state *before* sending the last packet as RxRPC
	 * might give us the reply before it returns from sending the
	 * request */
	if (!call->send_pages)
		call->state = AFS_CALL_AWAIT_REPLY;
	ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size);
	if (ret < 0)
		goto error_do_abort;

	if (call->send_pages) {
		ret = afs_send_pages(call, &msg, iov);
		if (ret < 0)
			goto error_do_abort;
	}

	/* at this point, an async call may no longer exist as it may have
	 * already completed */
	return wait_mode->wait(call);

error_do_abort:
	rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
	while ((skb = skb_dequeue(&call->rx_queue)))
		afs_free_skb(skb);
error_kill_call:
	afs_end_call(call);
	_leave(" = %d", ret);
	return ret;
}
Ejemplo n.º 13
0
Archivo: rxrpc.c Proyecto: 020gzh/linux
/*
 * attach the data from a bunch of pages on an inode to a call
 */
static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
			  struct kvec *iov)
{
	struct page *pages[8];
	unsigned count, n, loop, offset, to;
	pgoff_t first = call->first, last = call->last;
	int ret;

	_enter("");

	offset = call->first_offset;
	call->first_offset = 0;

	do {
		_debug("attach %lx-%lx", first, last);

		count = last - first + 1;
		if (count > ARRAY_SIZE(pages))
			count = ARRAY_SIZE(pages);
		n = find_get_pages_contig(call->mapping, first, count, pages);
		ASSERTCMP(n, ==, count);

		loop = 0;
		do {
			msg->msg_flags = 0;
			to = PAGE_SIZE;
			if (first + loop >= last)
				to = call->last_to;
			else
				msg->msg_flags = MSG_MORE;
			iov->iov_base = kmap(pages[loop]) + offset;
			iov->iov_len = to - offset;
			offset = 0;

			_debug("- range %u-%u%s",
			       offset, to, msg->msg_flags ? " [more]" : "");
			iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC,
				      iov, 1, to - offset);

			/* have to change the state *before* sending the last
			 * packet as RxRPC might give us the reply before it
			 * returns from sending the request */
			if (first + loop >= last)
				call->state = AFS_CALL_AWAIT_REPLY;
			ret = rxrpc_kernel_send_data(call->rxcall, msg,
						     to - offset);
			kunmap(pages[loop]);
			if (ret < 0)
				break;
		} while (++loop < count);
		first += count;

		for (loop = 0; loop < count; loop++)
			put_page(pages[loop]);
		if (ret < 0)
			break;
	} while (first <= last);

	_leave(" = %d", ret);
	return ret;
}
Ejemplo n.º 14
0
static int
smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
	int rc;
	struct kvec *iov = rqst->rq_iov;
	int n_vec = rqst->rq_nvec;
	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
	unsigned long send_length;
	unsigned int i;
	size_t total_len = 0, sent, size;
	struct socket *ssocket = server->ssocket;
	struct msghdr smb_msg;
	int val = 1;

	if (ssocket == NULL)
		return -ENOTSOCK;

	/* sanity check send length */
	send_length = rqst_len(rqst);
	if (send_length != smb_buf_length + 4) {
		WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
			send_length, smb_buf_length);
		return -EIO;
	}

	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
	dump_smb(iov[0].iov_base, iov[0].iov_len);

	/* cork the socket */
	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
				(char *)&val, sizeof(val));

	size = 0;
	for (i = 0; i < n_vec; i++)
		size += iov[i].iov_len;

	iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);

	rc = smb_send_kvec(server, &smb_msg, &sent);
	if (rc < 0)
		goto uncork;

	total_len += sent;

	/* now walk the page array and send each page in it */
	for (i = 0; i < rqst->rq_npages; i++) {
		size_t len = i == rqst->rq_npages - 1
				? rqst->rq_tailsz
				: rqst->rq_pagesz;
		struct bio_vec bvec = {
			.bv_page = rqst->rq_pages[i],
			.bv_len = len
		};
		iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
			      &bvec, 1, len);
		rc = smb_send_kvec(server, &smb_msg, &sent);
		if (rc < 0)
			break;

		total_len += sent;
	}

uncork:
	/* uncork it */
	val = 0;
	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
				(char *)&val, sizeof(val));

	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
			 smb_buf_length + 4, total_len);
		/*
		 * If we have only sent part of an SMB then the next SMB could
		 * be taken as the remainder of this one. We need to kill the
		 * socket so the server throws away the partial SMB
		 */
		server->tcpStatus = CifsNeedReconnect;
	}

	if (rc < 0 && rc != -EINTR)
		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
			 rc);
	else
		rc = 0;

	return rc;
}

static int
smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
{
	struct smb_rqst rqst = { .rq_iov = iov,
				 .rq_nvec = n_vec };

	return smb_send_rqst(server, &rqst);
}

int
smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
	 unsigned int smb_buf_length)
{
	struct kvec iov;

	iov.iov_base = smb_buffer;
	iov.iov_len = smb_buf_length + 4;

	return smb_sendv(server, &iov, 1);
}

static int
wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
		      int *credits)
{
	int rc;

	spin_lock(&server->req_lock);
	if (timeout == CIFS_ASYNC_OP) {
		/* oplock breaks must not be held up */
		server->in_flight++;
		*credits -= 1;
		spin_unlock(&server->req_lock);
		return 0;
	}

	while (1) {
		if (*credits <= 0) {
			spin_unlock(&server->req_lock);
			cifs_num_waiters_inc(server);
			rc = wait_event_killable(server->request_q,
						 has_credits(server, credits));
			cifs_num_waiters_dec(server);
			if (rc)
				return rc;
			spin_lock(&server->req_lock);
		} else {
			if (server->tcpStatus == CifsExiting) {
				spin_unlock(&server->req_lock);
				return -ENOENT;
			}

			/*
			 * Can not count locking commands against total
			 * as they are allowed to block on server.
			 */

			/* update # of requests on the wire to server */
			if (timeout != CIFS_BLOCKING_OP) {
				*credits -= 1;
				server->in_flight++;
			}
			spin_unlock(&server->req_lock);
			break;
		}
	}
	return 0;
}
Ejemplo n.º 15
0
/**
 * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
 * @sock: The socket that the call exists on
 * @call: The call to send data through
 * @buf: The buffer to receive into
 * @size: The size of the buffer, including data already read
 * @_offset: The running offset into the buffer.
 * @want_more: True if more data is expected to be read
 * @_abort: Where the abort code is stored if -ECONNABORTED is returned
 *
 * Allow a kernel service to receive data and pick up information about the
 * state of a call.  Returns 0 if got what was asked for and there's more
 * available, 1 if we got what was asked for and we're at the end of the data
 * and -EAGAIN if we need more data.
 *
 * Note that we may return -EAGAIN to drain empty packets at the end of the
 * data, even if we've already copied over the requested data.
 *
 * This function adds the amount it transfers to *_offset, so this should be
 * precleared as appropriate.  Note that the amount remaining in the buffer is
 * taken to be size - *_offset.
 *
 * *_abort should also be initialised to 0.
 */
int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
                           void *buf, size_t size, size_t *_offset,
                           bool want_more, u32 *_abort)
{
    struct iov_iter iter;
    struct kvec iov;
    int ret;

    _enter("{%d,%s},%zu/%zu,%d",
           call->debug_id, rxrpc_call_states[call->state],
           *_offset, size, want_more);

    ASSERTCMP(*_offset, <=, size);
    ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);

    iov.iov_base = buf + *_offset;
    iov.iov_len = size - *_offset;
    iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, size - *_offset);

    lock_sock(sock->sk);

    switch (call->state) {
    case RXRPC_CALL_CLIENT_RECV_REPLY:
    case RXRPC_CALL_SERVER_RECV_REQUEST:
    case RXRPC_CALL_SERVER_ACK_REQUEST:
        ret = rxrpc_recvmsg_data(sock, call, NULL, &iter, size, 0,
                                 _offset);
        if (ret < 0)
            goto out;

        /* We can only reach here with a partially full buffer if we
         * have reached the end of the data.  We must otherwise have a
         * full buffer or have been given -EAGAIN.
         */
        if (ret == 1) {
            if (*_offset < size)
                goto short_data;
            if (!want_more)
                goto read_phase_complete;
            ret = 0;
            goto out;
        }

        if (!want_more)
            goto excess_data;
        goto out;

    case RXRPC_CALL_COMPLETE:
        goto call_complete;

    default:
        ret = -EINPROGRESS;
        goto out;
    }

read_phase_complete:
    ret = 1;
out:
    release_sock(sock->sk);
    _leave(" = %d [%zu,%d]", ret, *_offset, *_abort);
    return ret;

short_data:
    ret = -EBADMSG;
    goto out;
excess_data:
    ret = -EMSGSIZE;
    goto out;
call_complete:
    *_abort = call->abort_code;
    ret = call->error;
    if (call->completion == RXRPC_CALL_SUCCEEDED) {
        ret = 1;
        if (size > 0)
            ret = -ECONNRESET;
    }
    goto out;
}
Ejemplo n.º 16
0
int
ksocknal_lib_recv_iov(struct ksock_conn *conn)
{
	unsigned int niov = conn->ksnc_rx_niov;
	struct kvec *iov = conn->ksnc_rx_iov;
	struct msghdr msg = {
		.msg_flags = 0
	};
	int nob;
	int i;
	int rc;
	int fragnob;
	int sum;
	__u32 saved_csum;

	LASSERT(niov > 0);

	for (nob = i = 0; i < niov; i++)
		nob += iov[i].iov_len;

	LASSERT(nob <= conn->ksnc_rx_nob_wanted);

	iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, niov, nob);
	rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);

	saved_csum = 0;
	if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
		saved_csum = conn->ksnc_msg.ksm_csum;
		conn->ksnc_msg.ksm_csum = 0;
	}

	if (saved_csum) {
		/* accumulate checksum */
		for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
			LASSERT(i < niov);

			fragnob = iov[i].iov_len;
			if (fragnob > sum)
				fragnob = sum;

			conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
							   iov[i].iov_base, fragnob);
		}
		conn->ksnc_msg.ksm_csum = saved_csum;
	}

	return rc;
}

int
ksocknal_lib_recv_kiov(struct ksock_conn *conn)
{
	unsigned int niov = conn->ksnc_rx_nkiov;
	lnet_kiov_t   *kiov = conn->ksnc_rx_kiov;
	struct msghdr msg = {
		.msg_flags = 0
	};
	int nob;
	int i;
	int rc;
	void *base;
	int sum;
	int fragnob;

	for (nob = i = 0; i < niov; i++)
		nob += kiov[i].bv_len;

	LASSERT(nob <= conn->ksnc_rx_nob_wanted);

	iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, kiov, niov, nob);
	rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);

	if (conn->ksnc_msg.ksm_csum) {
		for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
			LASSERT(i < niov);

			base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
			fragnob = kiov[i].bv_len;
			if (fragnob > sum)
				fragnob = sum;

			conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
							   base, fragnob);

			kunmap(kiov[i].bv_page);
		}
	}
	return rc;
}

void
ksocknal_lib_csum_tx(struct ksock_tx *tx)
{
	int i;
	__u32 csum;
	void *base;

	LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
	LASSERT(tx->tx_conn);
	LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);

	tx->tx_msg.ksm_csum = 0;

	csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
			     tx->tx_iov[0].iov_len);

	if (tx->tx_kiov) {
		for (i = 0; i < tx->tx_nkiov; i++) {
			base = kmap(tx->tx_kiov[i].bv_page) +
			       tx->tx_kiov[i].bv_offset;

			csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len);

			kunmap(tx->tx_kiov[i].bv_page);
		}
	} else {
		for (i = 1; i < tx->tx_niov; i++)
			csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
					     tx->tx_iov[i].iov_len);
	}

	if (*ksocknal_tunables.ksnd_inject_csum_error) {
		csum++;
		*ksocknal_tunables.ksnd_inject_csum_error = 0;
	}

	tx->tx_msg.ksm_csum = csum;
}
Ejemplo n.º 17
0
static int
__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
		struct smb_rqst *rqst)
{
	int rc = 0;
	struct kvec *iov;
	int n_vec;
	unsigned int send_length = 0;
	unsigned int i, j;
	sigset_t mask, oldmask;
	size_t total_len = 0, sent, size;
	struct socket *ssocket = server->ssocket;
	struct msghdr smb_msg;
	int val = 1;
	__be32 rfc1002_marker;

	if (cifs_rdma_enabled(server) && server->smbd_conn) {
		rc = smbd_send(server, num_rqst, rqst);
		goto smbd_done;
	}

	if (ssocket == NULL)
		return -EAGAIN;

	if (signal_pending(current)) {
		cifs_dbg(FYI, "signal is pending before sending any data\n");
		return -EINTR;
	}

	/* cork the socket */
	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
				(char *)&val, sizeof(val));

	for (j = 0; j < num_rqst; j++)
		send_length += smb_rqst_len(server, &rqst[j]);
	rfc1002_marker = cpu_to_be32(send_length);

	/*
	 * We should not allow signals to interrupt the network send because
	 * any partial send will cause session reconnects thus increasing
	 * latency of system calls and overload a server with unnecessary
	 * requests.
	 */

	sigfillset(&mask);
	sigprocmask(SIG_BLOCK, &mask, &oldmask);

	/* Generate a rfc1002 marker for SMB2+ */
	if (server->vals->header_preamble_size == 0) {
		struct kvec hiov = {
			.iov_base = &rfc1002_marker,
			.iov_len  = 4
		};
		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
		rc = smb_send_kvec(server, &smb_msg, &sent);
		if (rc < 0)
			goto unmask;

		total_len += sent;
		send_length += 4;
	}

	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);

	for (j = 0; j < num_rqst; j++) {
		iov = rqst[j].rq_iov;
		n_vec = rqst[j].rq_nvec;

		size = 0;
		for (i = 0; i < n_vec; i++) {
			dump_smb(iov[i].iov_base, iov[i].iov_len);
			size += iov[i].iov_len;
		}

		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);

		rc = smb_send_kvec(server, &smb_msg, &sent);
		if (rc < 0)
			goto unmask;

		total_len += sent;

		/* now walk the page array and send each page in it */
		for (i = 0; i < rqst[j].rq_npages; i++) {
			struct bio_vec bvec;

			bvec.bv_page = rqst[j].rq_pages[i];
			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
					     &bvec.bv_offset);

			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
				      &bvec, 1, bvec.bv_len);
			rc = smb_send_kvec(server, &smb_msg, &sent);
			if (rc < 0)
				break;

			total_len += sent;
		}
	}

unmask:
	sigprocmask(SIG_SETMASK, &oldmask, NULL);

	/*
	 * If signal is pending but we have already sent the whole packet to
	 * the server we need to return success status to allow a corresponding
	 * mid entry to be kept in the pending requests queue thus allowing
	 * to handle responses from the server by the client.
	 *
	 * If only part of the packet has been sent there is no need to hide
	 * interrupt because the session will be reconnected anyway, so there
	 * won't be any response from the server to handle.
	 */

	if (signal_pending(current) && (total_len != send_length)) {
		cifs_dbg(FYI, "signal is pending after attempt to send\n");
		rc = -EINTR;
	}

	/* uncork it */
	val = 0;
	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
				(char *)&val, sizeof(val));

	if ((total_len > 0) && (total_len != send_length)) {
		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
			 send_length, total_len);
		/*
		 * If we have only sent part of an SMB then the next SMB could
		 * be taken as the remainder of this one. We need to kill the
		 * socket so the server throws away the partial SMB
		 */
		server->tcpStatus = CifsNeedReconnect;
		trace_smb3_partial_send_reconnect(server->CurrentMid,
						  server->hostname);
	}
smbd_done:
	if (rc < 0 && rc != -EINTR)
		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
			 rc);
	else if (rc > 0)
		rc = 0;

	return rc;
}

static int
smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
	      struct smb_rqst *rqst, int flags)
{
	struct kvec iov;
	struct smb2_transform_hdr tr_hdr;
	struct smb_rqst cur_rqst[MAX_COMPOUND];
	int rc;

	if (!(flags & CIFS_TRANSFORM_REQ))
		return __smb_send_rqst(server, num_rqst, rqst);

	if (num_rqst > MAX_COMPOUND - 1)
		return -ENOMEM;

	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
	memset(&iov, 0, sizeof(iov));
	memset(&tr_hdr, 0, sizeof(tr_hdr));

	iov.iov_base = &tr_hdr;
	iov.iov_len = sizeof(tr_hdr);
	cur_rqst[0].rq_iov = &iov;
	cur_rqst[0].rq_nvec = 1;

	if (!server->ops->init_transform_rq) {
		cifs_dbg(VFS, "Encryption requested but transform callback "
			 "is missing\n");
		return -EIO;
	}

	rc = server->ops->init_transform_rq(server, num_rqst + 1,
					    &cur_rqst[0], rqst);
	if (rc)
		return rc;

	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
	return rc;
}

int
smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
	 unsigned int smb_buf_length)
{
	struct kvec iov[2];
	struct smb_rqst rqst = { .rq_iov = iov,
				 .rq_nvec = 2 };

	iov[0].iov_base = smb_buffer;
	iov[0].iov_len = 4;
	iov[1].iov_base = (char *)smb_buffer + 4;
	iov[1].iov_len = smb_buf_length;

	return __smb_send_rqst(server, 1, &rqst);
}

static int
wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
		      const int timeout, const int flags,
		      unsigned int *instance)
{
	int rc;
	int *credits;
	int optype;
	long int t;

	if (timeout < 0)
		t = MAX_JIFFY_OFFSET;
	else
		t = msecs_to_jiffies(timeout);

	optype = flags & CIFS_OP_MASK;

	*instance = 0;

	credits = server->ops->get_credits_field(server, optype);
	/* Since an echo is already inflight, no need to wait to send another */
	if (*credits <= 0 && optype == CIFS_ECHO_OP)
		return -EAGAIN;

	spin_lock(&server->req_lock);
	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
		/* oplock breaks must not be held up */
		server->in_flight++;
		*credits -= 1;
		*instance = server->reconnect_instance;
		spin_unlock(&server->req_lock);
		return 0;
	}

	while (1) {
		if (*credits < num_credits) {
			spin_unlock(&server->req_lock);
			cifs_num_waiters_inc(server);
			rc = wait_event_killable_timeout(server->request_q,
				has_credits(server, credits, num_credits), t);
			cifs_num_waiters_dec(server);
			if (!rc) {
				trace_smb3_credit_timeout(server->CurrentMid,
					server->hostname, num_credits);
				cifs_dbg(VFS, "wait timed out after %d ms\n",
					 timeout);
				return -ENOTSUPP;
			}
			if (rc == -ERESTARTSYS)
				return -ERESTARTSYS;
			spin_lock(&server->req_lock);
		} else {
			if (server->tcpStatus == CifsExiting) {
				spin_unlock(&server->req_lock);
				return -ENOENT;
			}

			/*
			 * For normal commands, reserve the last MAX_COMPOUND
			 * credits to compound requests.
			 * Otherwise these compounds could be permanently
			 * starved for credits by single-credit requests.
			 *
			 * To prevent spinning CPU, block this thread until
			 * there are >MAX_COMPOUND credits available.
			 * But only do this is we already have a lot of
			 * credits in flight to avoid triggering this check
			 * for servers that are slow to hand out credits on
			 * new sessions.
			 */
			if (!optype && num_credits == 1 &&
			    server->in_flight > 2 * MAX_COMPOUND &&
			    *credits <= MAX_COMPOUND) {
				spin_unlock(&server->req_lock);
				cifs_num_waiters_inc(server);
				rc = wait_event_killable_timeout(
					server->request_q,
					has_credits(server, credits,
						    MAX_COMPOUND + 1),
					t);
				cifs_num_waiters_dec(server);
				if (!rc) {
					trace_smb3_credit_timeout(
						server->CurrentMid,
						server->hostname, num_credits);
					cifs_dbg(VFS, "wait timed out after %d ms\n",
						 timeout);
					return -ENOTSUPP;
				}
				if (rc == -ERESTARTSYS)
					return -ERESTARTSYS;
				spin_lock(&server->req_lock);
				continue;
			}

			/*
			 * Can not count locking commands against total
			 * as they are allowed to block on server.
			 */

			/* update # of requests on the wire to server */
			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
				*credits -= num_credits;
				server->in_flight += num_credits;
				*instance = server->reconnect_instance;
			}
			spin_unlock(&server->req_lock);
			break;
		}
	}
	return 0;
}

static int
wait_for_free_request(struct TCP_Server_Info *server, const int flags,
		      unsigned int *instance)
{
	return wait_for_free_credits(server, 1, -1, flags,
				     instance);
}

static int
wait_for_compound_request(struct TCP_Server_Info *server, int num,
			  const int flags, unsigned int *instance)
{
	int *credits;

	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);

	spin_lock(&server->req_lock);
	if (*credits < num) {
		/*
		 * Return immediately if not too many requests in flight since
		 * we will likely be stuck on waiting for credits.
		 */
		if (server->in_flight < num - *credits) {
			spin_unlock(&server->req_lock);
			return -ENOTSUPP;
		}
	}
	spin_unlock(&server->req_lock);

	return wait_for_free_credits(server, num, 60000, flags,
				     instance);
}
Ejemplo n.º 18
0
Archivo: rxrpc.c Proyecto: krzk/linux
/*
 * initiate a call
 */
long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
		   gfp_t gfp, bool async)
{
	struct sockaddr_rxrpc *srx = ac->addr;
	struct rxrpc_call *rxcall;
	struct msghdr msg;
	struct kvec iov[1];
	size_t offset;
	s64 tx_total_len;
	int ret;

	_enter(",{%pISp},", &srx->transport);

	ASSERT(call->type != NULL);
	ASSERT(call->type->name != NULL);

	_debug("____MAKE %p{%s,%x} [%d]____",
	       call, call->type->name, key_serial(call->key),
	       atomic_read(&call->net->nr_outstanding_calls));

	call->async = async;

	/* Work out the length we're going to transmit.  This is awkward for
	 * calls such as FS.StoreData where there's an extra injection of data
	 * after the initial fixed part.
	 */
	tx_total_len = call->request_size;
	if (call->send_pages) {
		if (call->last == call->first) {
			tx_total_len += call->last_to - call->first_offset;
		} else {
			/* It looks mathematically like you should be able to
			 * combine the following lines with the ones above, but
			 * unsigned arithmetic is fun when it wraps...
			 */
			tx_total_len += PAGE_SIZE - call->first_offset;
			tx_total_len += call->last_to;
			tx_total_len += (call->last - call->first - 1) * PAGE_SIZE;
		}
	}

	/* create a call */
	rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
					 (unsigned long)call,
					 tx_total_len, gfp,
					 (async ?
					  afs_wake_up_async_call :
					  afs_wake_up_call_waiter),
					 call->upgrade,
					 call->debug_id);
	if (IS_ERR(rxcall)) {
		ret = PTR_ERR(rxcall);
		goto error_kill_call;
	}

	call->rxcall = rxcall;

	/* send the request */
	iov[0].iov_base	= call->request;
	iov[0].iov_len	= call->request_size;

	msg.msg_name		= NULL;
	msg.msg_namelen		= 0;
	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
		      call->request_size);
	msg.msg_control		= NULL;
	msg.msg_controllen	= 0;
	msg.msg_flags		= MSG_WAITALL | (call->send_pages ? MSG_MORE : 0);

	ret = rxrpc_kernel_send_data(call->net->socket, rxcall,
				     &msg, call->request_size,
				     afs_notify_end_request_tx);
	if (ret < 0)
		goto error_do_abort;

	if (call->send_pages) {
		ret = afs_send_pages(call, &msg);
		if (ret < 0)
			goto error_do_abort;
	}

	/* at this point, an async call may no longer exist as it may have
	 * already completed */
	if (call->async)
		return -EINPROGRESS;

	return afs_wait_for_call_to_complete(call, ac);

error_do_abort:
	call->state = AFS_CALL_COMPLETE;
	if (ret != -ECONNABORTED) {
		rxrpc_kernel_abort_call(call->net->socket, rxcall,
					RX_USER_ABORT, ret, "KSD");
	} else {
		offset = 0;
		rxrpc_kernel_recv_data(call->net->socket, rxcall, NULL,
				       0, &offset, false, &call->abort_code,
				       &call->service_id);
		ac->abort_code = call->abort_code;
		ac->responded = true;
	}
	call->error = ret;
	trace_afs_call_done(call);
error_kill_call:
	afs_put_call(call);
	ac->error = ret;
	_leave(" = %d", ret);
	return ret;
}