Beispiel #1
0
static int pep_wait_connreq(struct sock *sk, int noblock)
{
	struct task_struct *tsk = current;
	struct pep_sock *pn = pep_sk(sk);
	long timeo = sock_rcvtimeo(sk, noblock);

	for (;;) {
		DEFINE_WAIT(wait);

		if (sk->sk_state != TCP_LISTEN)
			return -EINVAL;
		if (!hlist_empty(&pn->ackq))
			break;
		if (!timeo)
			return -EWOULDBLOCK;
		if (signal_pending(tsk))
			return sock_intr_errno(timeo);

		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
						TASK_INTERRUPTIBLE);
		release_sock(sk);
		timeo = schedule_timeout(timeo);
		lock_sock(sk);
		finish_wait(sk_sleep(sk), &wait);
	}

	return 0;
}
/**
 *	__skb_recv_datagram - Receive a datagram skbuff
 *	@sk: socket
 *	@flags: MSG_ flags
 *	@peeked: returns non-zero if this packet has been seen before
 *	@err: error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was correct in any case. 8)
		 */
		unsigned long cpu_flags;

		spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
		skb = skb_peek(&sk->sk_receive_queue);
		if (skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else{
                            if(!skb->next || IS_ERR(skb->next)){
                                printk("[NET] skb->next error in %s\n", __func__);
                                error = -EAGAIN;
                                spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
                                goto no_packet;
                            }else{
				__skb_unlink(skb, &sk->sk_receive_queue);
                            }
                        }
		}
		spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);

		if (skb)
			return skb;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Beispiel #3
0
/**
 *	__skb_recv_datagram - Receive a datagram skbuff
 *	@sk: socket
 *	@flags: MSG_ flags
 *	@peeked: returns non-zero if this packet has been seen before
 *	@off: an offset in bytes to peek skb from. Returns an offset
 *	      within an skb where data actually starts
 *	@err: error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
				    int *peeked, int *off, int *err)
{
	struct sk_buff *skb, *last;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was correct in any case. 8)
		 */
		unsigned long cpu_flags;
		struct sk_buff_head *queue = &sk->sk_receive_queue;
		int _off = *off;

		last = (struct sk_buff *)queue;
		spin_lock_irqsave(&queue->lock, cpu_flags);
		skb_queue_walk(queue, skb) {
			last = skb;
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
				if (_off >= skb->len && (skb->len || _off ||
							 skb->peeked)) {
					_off -= skb->len;
					continue;
				}
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else
				__skb_unlink(skb, queue);

			spin_unlock_irqrestore(&queue->lock, cpu_flags);
			*off = _off;
			return skb;
		}
		spin_unlock_irqrestore(&queue->lock, cpu_flags);

		if (sk_can_busy_loop(sk) &&
		    sk_busy_loop(sk, flags & MSG_DONTWAIT))
			continue;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_more_packets(sk, err, &timeo, last));
Beispiel #4
0
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *off, int *err)
{
	struct sk_buff *skb;
	long timeo;
	int error = 0;

	if ((!sk) || (IS_ERR(sk)))
		goto no_packet;

	error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		unsigned long cpu_flags;
		struct sk_buff_head *queue = &sk->sk_receive_queue;

		spin_lock_irqsave(&queue->lock, cpu_flags);
		skb_queue_walk(queue, skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
#ifdef CONFIG_HTC_NETWORK_MODIFY
				if (*off >= skb->len && skb->len) {
#else
				if (*off >= skb->len && skb->len) {
#endif
					*off -= skb->len;
					continue;
				}
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else
				__skb_unlink(skb, queue);

			spin_unlock_irqrestore(&queue->lock, cpu_flags);
			return skb;
		}
		spin_unlock_irqrestore(&queue->lock, cpu_flags);

		
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Beispiel #5
0
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
	DECLARE_WAITQUEUE(wait, current);
	struct sock *sk = sock->sk, *nsk;
	long timeo;
	int err = 0;

	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);

	if (sk->sk_state != BT_LISTEN) {
		err = -EBADFD;
		goto done;
	}

	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);

	BT_DBG("sk %p timeo %ld", sk, timeo);

	/* Wait for an incoming connection. (wake-one). */
	add_wait_queue_exclusive(sk_sleep(sk), &wait);
	while (!(nsk = bt_accept_dequeue(sk, newsock))) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (!timeo) {
			err = -EAGAIN;
			break;
		}

		release_sock(sk);
		timeo = schedule_timeout(timeo);
		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);

		if (sk->sk_state != BT_LISTEN) {
			err = -EBADFD;
			break;
		}

		if (signal_pending(current)) {
			err = sock_intr_errno(timeo);
			break;
		}
	}
	set_current_state(TASK_RUNNING);
	remove_wait_queue(sk_sleep(sk), &wait);

	if (err)
		goto done;

	newsock->state = SS_CONNECTED;

	BT_DBG("new socket %p", nsk);

done:
	release_sock(sk);
	return err;
}
Beispiel #6
0
static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
			     int flags)
{
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
	struct sock *sk = sock->sk, *nsk;
	long timeo;
	int err = 0;

	lock_sock_nested(sk, L2CAP_NESTING_PARENT);

	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);

	BT_DBG("sk %p timeo %ld", sk, timeo);

	/* Wait for an incoming connection. (wake-one). */
	add_wait_queue_exclusive(sk_sleep(sk), &wait);
	while (1) {
		if (sk->sk_state != BT_LISTEN) {
			err = -EBADFD;
			break;
		}

		nsk = bt_accept_dequeue(sk, newsock);
		if (nsk)
			break;

		if (!timeo) {
			err = -EAGAIN;
			break;
		}

		if (signal_pending(current)) {
			err = sock_intr_errno(timeo);
			break;
		}

		release_sock(sk);

		timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);

		lock_sock_nested(sk, L2CAP_NESTING_PARENT);
	}
	remove_wait_queue(sk_sleep(sk), &wait);

	if (err)
		goto done;

	newsock->state = SS_CONNECTED;

	BT_DBG("new socket %p", nsk);

done:
	release_sock(sk);
	return err;
}
/**
 *	skb_recv_datagram - Receive a datagram skbuff
 *	@sk - socket
 *	@flags - MSG_ flags
 *	@noblock - blocking operation?
 *	@err - error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
				  int noblock, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, noblock);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was corrent in any case. 8)
		 */
		if (flags & MSG_PEEK) {
			unsigned long cpu_flags;

			spin_lock_irqsave(&sk->sk_receive_queue.lock,
					  cpu_flags);
			skb = skb_peek(&sk->sk_receive_queue);
			if (skb)
				atomic_inc(&skb->users);
			spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
					       cpu_flags);
		} else
			skb = skb_dequeue(&sk->sk_receive_queue);

		if (skb)
			return skb;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Beispiel #8
0
/**
 *	llc_ui_recvmsg - copy received data to the socket user.
 *	@sock: Socket to copy data from.
 *	@msg: Various user space related information.
 *	@size: Size of user buffer.
 *	@flags: User specified flags.
 *
 *	Copy received data to the socket user.
 *	Returns non-negative upon success, negative otherwise.
 */
static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
			  struct msghdr *msg, size_t size, int flags)
{
	struct sock *sk = sock->sk;
	struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name;
	struct sk_buff *skb;
	size_t copied = 0;
	int rc = -ENOMEM, timeout;
	int noblock = flags & MSG_DONTWAIT;

	dprintk("%s: receiving in %02X from %02X\n", __FUNCTION__,
		llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap);
	lock_sock(sk);
	timeout = sock_rcvtimeo(sk, noblock);
	rc = llc_ui_wait_for_data(sk, timeout);
	if (rc) {
		dprintk("%s: llc_ui_wait_for_data failed recv "
			"in %02X from %02X\n", __FUNCTION__,
			llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap);
		goto out;
	}
	skb = skb_dequeue(&sk->sk_receive_queue);
	if (!skb) /* shutdown */
		goto out;
	copied = skb->len;
	if (copied > size)
		copied = size;
	rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
	if (rc)
		goto dgram_free;
	if (skb->len > copied) {
		skb_pull(skb, copied);
		skb_queue_head(&sk->sk_receive_queue, skb);
	}
	if (uaddr)
		memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
	msg->msg_namelen = sizeof(*uaddr);
	if (!skb->list) {
dgram_free:
		kfree_skb(skb);
	}
out:
	release_sock(sk);
	return rc ? : copied;
}
Beispiel #9
0
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
				    void (*destructor)(struct sock *sk,
						       struct sk_buff *skb),
				    int *peeked, int *off, int *err)
{
	struct sk_buff *skb, *last;
	long timeo;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		skb = __skb_try_recv_datagram(sk, flags, destructor, peeked,
					      off, err, &last);
		if (skb)
			return skb;

		if (*err != -EAGAIN)
			break;
	} while (timeo &&
		!__skb_wait_for_more_packets(sk, err, &timeo, last));

	return NULL;
}
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

    // /* 当socket为阻塞时,获取timeout的值 */
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was corrent in any case. 8)
		 */
		unsigned long cpu_flags;
         /* 
         当查看socket是否有数据包时,需要上锁,因为需要保证其它线程不会将数据包取走。
         */
		spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
		skb = skb_peek(&sk->sk_receive_queue); /* 查看在socket的buffer中是否有数据包 */
		if (skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
			     /* 
                设置MSG_PEEK,表示用户不是真的要读取数据,只是一个peek调用。
                那么并不真正读取数据
                */
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else
				__skb_unlink(skb, &sk->sk_receive_queue);//从队列中取出数据,即可看作读出数据
		}
		spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);

		if (skb) // 有数据包,返回skb
			return skb;

         /*
        timeo为0,有2中情况:1种是socket为非阻塞的,第2种,即socket阻塞的时间已经超过了timeo的值,
	那么就跳到no_packet处理 
        */
		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));//阻塞进程,等待数据包

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Beispiel #11
0
static int
vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
		     int flags)
{
	struct sock *sk;
	struct vsock_sock *vsk;
	int err;
	size_t target;
	ssize_t copied;
	long timeout;
	struct vsock_transport_recv_notify_data recv_data;

	DEFINE_WAIT(wait);

	sk = sock->sk;
	vsk = vsock_sk(sk);
	err = 0;

	lock_sock(sk);

	if (sk->sk_state != TCP_ESTABLISHED) {
		/* Recvmsg is supposed to return 0 if a peer performs an
		 * orderly shutdown. Differentiate between that case and when a
		 * peer has not connected or a local shutdown occured with the
		 * SOCK_DONE flag.
		 */
		if (sock_flag(sk, SOCK_DONE))
			err = 0;
		else
			err = -ENOTCONN;

		goto out;
	}

	if (flags & MSG_OOB) {
		err = -EOPNOTSUPP;
		goto out;
	}

	/* We don't check peer_shutdown flag here since peer may actually shut
	 * down, but there can be data in the queue that a local socket can
	 * receive.
	 */
	if (sk->sk_shutdown & RCV_SHUTDOWN) {
		err = 0;
		goto out;
	}

	/* It is valid on Linux to pass in a zero-length receive buffer.  This
	 * is not an error.  We may as well bail out now.
	 */
	if (!len) {
		err = 0;
		goto out;
	}

	/* We must not copy less than target bytes into the user's buffer
	 * before returning successfully, so we wait for the consume queue to
	 * have that much data to consume before dequeueing.  Note that this
	 * makes it impossible to handle cases where target is greater than the
	 * queue size.
	 */
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
	if (target >= transport->stream_rcvhiwat(vsk)) {
		err = -ENOMEM;
		goto out;
	}
	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
	copied = 0;

	err = transport->notify_recv_init(vsk, target, &recv_data);
	if (err < 0)
		goto out;


	while (1) {
		s64 ready;

		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
		ready = vsock_stream_has_data(vsk);

		if (ready == 0) {
			if (sk->sk_err != 0 ||
			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
			    (vsk->peer_shutdown & SEND_SHUTDOWN)) {
				finish_wait(sk_sleep(sk), &wait);
				break;
			}
			/* Don't wait for non-blocking sockets. */
			if (timeout == 0) {
				err = -EAGAIN;
				finish_wait(sk_sleep(sk), &wait);
				break;
			}

			err = transport->notify_recv_pre_block(
					vsk, target, &recv_data);
			if (err < 0) {
				finish_wait(sk_sleep(sk), &wait);
				break;
			}
			release_sock(sk);
			timeout = schedule_timeout(timeout);
			lock_sock(sk);

			if (signal_pending(current)) {
				err = sock_intr_errno(timeout);
				finish_wait(sk_sleep(sk), &wait);
				break;
			} else if (timeout == 0) {
				err = -EAGAIN;
				finish_wait(sk_sleep(sk), &wait);
				break;
			}
		} else {
			ssize_t read;

			finish_wait(sk_sleep(sk), &wait);

			if (ready < 0) {
				/* Invalid queue pair content. XXX This should
				* be changed to a connection reset in a later
				* change.
				*/

				err = -ENOMEM;
				goto out;
			}

			err = transport->notify_recv_pre_dequeue(
					vsk, target, &recv_data);
			if (err < 0)
				break;

			read = transport->stream_dequeue(
					vsk, msg,
					len - copied, flags);
			if (read < 0) {
				err = -ENOMEM;
				break;
			}

			copied += read;

			err = transport->notify_recv_post_dequeue(
					vsk, target, read,
					!(flags & MSG_PEEK), &recv_data);
			if (err < 0)
				goto out;

			if (read >= target || flags & MSG_PEEK)
				break;

			target -= read;
		}
	}

	if (sk->sk_err)
		err = -sk->sk_err;
	else if (sk->sk_shutdown & RCV_SHUTDOWN)
		err = 0;

	if (copied > 0)
		err = copied;

out:
	release_sock(sk);
	return err;
}
Beispiel #12
0
static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
		int len, int flags)
{
	struct sock *sk = sock->sk;
	struct pn_sock *pn = pn_sk(sk);
	struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
	struct task_struct *tsk = current;
	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	int err;

	if (pn_socket_autobind(sock))
		return -ENOBUFS;
	if (len < sizeof(struct sockaddr_pn))
		return -EINVAL;
	if (spn->spn_family != AF_PHONET)
		return -EAFNOSUPPORT;

	lock_sock(sk);

	switch (sock->state) {
	case SS_UNCONNECTED:
		if (sk->sk_state != TCP_CLOSE) {
			err = -EISCONN;
			goto out;
		}
		break;
	case SS_CONNECTING:
		err = -EALREADY;
		goto out;
	default:
		err = -EISCONN;
		goto out;
	}

	pn->dobject = pn_sockaddr_get_object(spn);
	pn->resource = pn_sockaddr_get_resource(spn);
	sock->state = SS_CONNECTING;

	err = sk->sk_prot->connect(sk, addr, len);
	if (err) {
		sock->state = SS_UNCONNECTED;
		pn->dobject = 0;
		goto out;
	}

	while (sk->sk_state == TCP_SYN_SENT) {
		DEFINE_WAIT(wait);

		if (!timeo) {
			err = -EINPROGRESS;
			goto out;
		}
		if (signal_pending(tsk)) {
			err = sock_intr_errno(timeo);
			goto out;
		}

		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
						TASK_INTERRUPTIBLE);
		release_sock(sk);
		timeo = schedule_timeout(timeo);
		lock_sock(sk);
		finish_wait(sk_sleep(sk), &wait);
	}

	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
		err = 0;
	else if (sk->sk_state == TCP_CLOSE_WAIT)
		err = -ECONNRESET;
	else
		err = -ECONNREFUSED;
	sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
out:
	release_sock(sk);
	return err;
}
Beispiel #13
0
int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		 size_t len, int nonblock, int flags, int *addr_len)
{
	const struct dccp_hdr *dh;
	long timeo;

	lock_sock(sk);

	if (sk->sk_state == DCCP_LISTEN) {
		len = -ENOTCONN;
		goto out;
	}

	timeo = sock_rcvtimeo(sk, nonblock);

	do {
		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);

		if (skb == NULL)
			goto verify_sock_status;

		dh = dccp_hdr(skb);

		switch (dh->dccph_type) {
		case DCCP_PKT_DATA:
		case DCCP_PKT_DATAACK:
			goto found_ok_skb;

		case DCCP_PKT_CLOSE:
		case DCCP_PKT_CLOSEREQ:
			if (!(flags & MSG_PEEK))
				dccp_finish_passive_close(sk);
			/* fall through */
		case DCCP_PKT_RESET:
			dccp_pr_debug("found fin (%s) ok!\n",
				      dccp_packet_name(dh->dccph_type));
			len = 0;
			goto found_fin_ok;
		default:
			dccp_pr_debug("packet_type=%s\n",
				      dccp_packet_name(dh->dccph_type));
			sk_eat_skb(sk, skb, false);
		}
verify_sock_status:
		if (sock_flag(sk, SOCK_DONE)) {
			len = 0;
			break;
		}

		if (sk->sk_err) {
			len = sock_error(sk);
			break;
		}

		if (sk->sk_shutdown & RCV_SHUTDOWN) {
			len = 0;
			break;
		}

		if (sk->sk_state == DCCP_CLOSED) {
			if (!sock_flag(sk, SOCK_DONE)) {
				/* This occurs when user tries to read
				 * from never connected socket.
				 */
				len = -ENOTCONN;
				break;
			}
			len = 0;
			break;
		}

		if (!timeo) {
			len = -EAGAIN;
			break;
		}

		if (signal_pending(current)) {
			len = sock_intr_errno(timeo);
			break;
		}

		sk_wait_data(sk, &timeo);
		continue;
	found_ok_skb:
		if (len > skb->len)
			len = skb->len;
		else if (len < skb->len)
			msg->msg_flags |= MSG_TRUNC;

		if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
			/* Exception. Bailout! */
			len = -EFAULT;
			break;
		}
		if (flags & MSG_TRUNC)
			len = skb->len;
	found_fin_ok:
		if (!(flags & MSG_PEEK))
			sk_eat_skb(sk, skb, false);
		break;
	} while (1);
out:
	release_sock(sk);
	return len;
}
Beispiel #14
0
/*
 * receive a message from an RxRPC socket
 * - we need to be careful about two or more threads calling recvmsg
 *   simultaneously
 */
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
		  struct msghdr *msg, size_t len, int flags)
{
	struct rxrpc_skb_priv *sp;
	struct rxrpc_call *call = NULL, *continue_call = NULL;
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	struct sk_buff *skb;
	long timeo;
	int copy, ret, ullen, offset, copied = 0;
	u32 abort_code;

	DEFINE_WAIT(wait);

	_enter(",,,%zu,%d", len, flags);

	if (flags & (MSG_OOB | MSG_TRUNC))
		return -EOPNOTSUPP;

	ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);

	timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
	msg->msg_flags |= MSG_MORE;

	lock_sock(&rx->sk);

	for (;;) {
		/* return immediately if a client socket has no outstanding
		 * calls */
		if (RB_EMPTY_ROOT(&rx->calls)) {
			if (copied)
				goto out;
			if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
				release_sock(&rx->sk);
				if (continue_call)
					rxrpc_put_call(continue_call);
				return -ENODATA;
			}
		}

		/* get the next message on the Rx queue */
		skb = skb_peek(&rx->sk.sk_receive_queue);
		if (!skb) {
			/* nothing remains on the queue */
			if (copied &&
			    (msg->msg_flags & MSG_PEEK || timeo == 0))
				goto out;

			/* wait for a message to turn up */
			release_sock(&rx->sk);
			prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
						  TASK_INTERRUPTIBLE);
			ret = sock_error(&rx->sk);
			if (ret)
				goto wait_error;

			if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
				if (signal_pending(current))
					goto wait_interrupted;
				timeo = schedule_timeout(timeo);
			}
			finish_wait(sk_sleep(&rx->sk), &wait);
			lock_sock(&rx->sk);
			continue;
		}

	peek_next_packet:
		sp = rxrpc_skb(skb);
		call = sp->call;
		ASSERT(call != NULL);

		_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);

		/* make sure we wait for the state to be updated in this call */
		spin_lock_bh(&call->lock);
		spin_unlock_bh(&call->lock);

		if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
			_debug("packet from released call");
			if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
				BUG();
			rxrpc_free_skb(skb);
			continue;
		}

		/* determine whether to continue last data receive */
		if (continue_call) {
			_debug("maybe cont");
			if (call != continue_call ||
			    skb->mark != RXRPC_SKB_MARK_DATA) {
				release_sock(&rx->sk);
				rxrpc_put_call(continue_call);
				_leave(" = %d [noncont]", copied);
				return copied;
			}
		}

		rxrpc_get_call(call);

		/* copy the peer address and timestamp */
		if (!continue_call) {
			if (msg->msg_name) {
				size_t len =
					sizeof(call->conn->trans->peer->srx);
				memcpy(msg->msg_name,
				       &call->conn->trans->peer->srx, len);
				msg->msg_namelen = len;
			}
			sock_recv_ts_and_drops(msg, &rx->sk, skb);
		}

		/* receive the message */
		if (skb->mark != RXRPC_SKB_MARK_DATA)
			goto receive_non_data_message;

		_debug("recvmsg DATA #%u { %d, %d }",
		       ntohl(sp->hdr.seq), skb->len, sp->offset);

		if (!continue_call) {
			/* only set the control data once per recvmsg() */
			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
				       ullen, &call->user_call_ID);
			if (ret < 0)
				goto copy_error;
			ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
		}

		ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
		ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
		call->rx_data_recv = ntohl(sp->hdr.seq);

		ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);

		offset = sp->offset;
		copy = skb->len - offset;
		if (copy > len - copied)
			copy = len - copied;

		ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy);

		if (ret < 0)
			goto copy_error;

		/* handle piecemeal consumption of data packets */
		_debug("copied %d+%d", copy, copied);

		offset += copy;
		copied += copy;

		if (!(flags & MSG_PEEK))
			sp->offset = offset;

		if (sp->offset < skb->len) {
			_debug("buffer full");
			ASSERTCMP(copied, ==, len);
			break;
		}

		/* we transferred the whole data packet */
		if (sp->hdr.flags & RXRPC_LAST_PACKET) {
			_debug("last");
			if (call->conn->out_clientflag) {
				 /* last byte of reply received */
				ret = copied;
				goto terminal_message;
			}

			/* last bit of request received */
			if (!(flags & MSG_PEEK)) {
				_debug("eat packet");
				if (skb_dequeue(&rx->sk.sk_receive_queue) !=
				    skb)
					BUG();
				rxrpc_free_skb(skb);
			}
			msg->msg_flags &= ~MSG_MORE;
			break;
		}

		/* move on to the next data message */
		_debug("next");
		if (!continue_call)
			continue_call = sp->call;
		else
			rxrpc_put_call(call);
		call = NULL;

		if (flags & MSG_PEEK) {
			_debug("peek next");
			skb = skb->next;
			if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
				break;
			goto peek_next_packet;
		}

		_debug("eat packet");
		if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
			BUG();
		rxrpc_free_skb(skb);
	}
Beispiel #15
0
static int recv_msg(struct kiocb *iocb, struct socket *sock,
		    struct msghdr *m, size_t buf_len, int flags)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	long timeout;
	unsigned int sz;
	u32 err;
	int res;

	/* Catch invalid receive requests */

	if (unlikely(!buf_len))
		return -EINVAL;

	lock_sock(sk);

	if (unlikely(sock->state == SS_UNCONNECTED)) {
		res = -ENOTCONN;
		goto exit;
	}

	/* will be updated in set_orig_addr() if needed */
	m->msg_namelen = 0;

	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:

	/* Look for a message in receive queue; wait if necessary */

	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (sock->state == SS_DISCONNECTING) {
			res = -ENOTCONN;
			goto exit;
		}
		if (timeout <= 0L) {
			res = timeout ? timeout : -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
							   tipc_rx_ready(sock),
							   timeout);
		lock_sock(sk);
	}

	/* Look at first message in receive queue */

	buf = skb_peek(&sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Complete connection setup for an implied connect */

	if (unlikely(sock->state == SS_CONNECTING)) {
		res = auto_connect(sock, msg);
		if (res)
			goto exit;
	}

	/* Discard an empty non-errored message & try again */

	if ((!sz) && (!err)) {
		advance_rx_queue(sk);
		goto restart;
	}

	/* Capture sender's address (optional) */

	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */

	res = anc_data_recv(m, msg, tport);
	if (res)
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */

	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
					      m->msg_iov, sz);
		if (res)
			goto exit;
		res = sz;
	} else {
		if ((sock->state == SS_READY) ||
		    ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */

	if (likely(!(flags & MSG_PEEK))) {
		if ((sock->state != SS_READY) &&
		    (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tport->ref, tport->conn_unacked);
		advance_rx_queue(sk);
	}
exit:
	release_sock(sk);
	return res;
}
Beispiel #16
0
static int recv_stream(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *m, size_t buf_len, int flags)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	long timeout;
	unsigned int sz;
	int sz_to_copy, target, needed;
	int sz_copied = 0;
	u32 err;
	int res = 0;

	/* Catch invalid receive attempts */

	if (unlikely(!buf_len))
		return -EINVAL;

	lock_sock(sk);

	if (unlikely((sock->state == SS_UNCONNECTED) ||
		     (sock->state == SS_CONNECTING))) {
		res = -ENOTCONN;
		goto exit;
	}

	/* will be updated in set_orig_addr() if needed */
	m->msg_namelen = 0;

	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:

	/* Look for a message in receive queue; wait if necessary */

	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (sock->state == SS_DISCONNECTING) {
			res = -ENOTCONN;
			goto exit;
		}
		if (timeout <= 0L) {
			res = timeout ? timeout : -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
							   tipc_rx_ready(sock),
							   timeout);
		lock_sock(sk);
	}

	/* Look at first message in receive queue */

	buf = skb_peek(&sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */

	if ((!sz) && (!err)) {
		advance_rx_queue(sk);
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */

	if (sz_copied == 0) {
		set_orig_addr(m, msg);
		res = anc_data_recv(m, msg, tport);
		if (res)
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */

	if (!err) {
		u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);

		sz -= offset;
		needed = (buf_len - sz_copied);
		sz_to_copy = (sz <= needed) ? sz : needed;

		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
					      m->msg_iov, sz_to_copy);
		if (res)
			goto exit;

		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
				TIPC_SKB_CB(buf)->handle =
				(void *)(unsigned long)(offset + sz_to_copy);
			goto exit;
		}
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */

	if (likely(!(flags & MSG_PEEK))) {
		if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tport->ref, tport->conn_unacked);
		advance_rx_queue(sk);
	}

	/* Loop around if more data is required */

	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
	    (!skb_queue_empty(&sk->sk_receive_queue) ||
	    (sz_copied < target)) &&	/* and more is ready or required */
	    (!(flags & MSG_PEEK)) &&	/* and aren't just peeking at data */
	    (!err))			/* and haven't reached a FIN */
		goto restart;

exit:
	release_sock(sk);
	return sz_copied ? sz_copied : res;
}
Beispiel #17
0
/*
 * Receive a message from an RxRPC socket
 * - we need to be careful about two or more threads calling recvmsg
 *   simultaneously
 */
int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                  int flags)
{
    struct rxrpc_call *call;
    struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
    struct list_head *l;
    size_t copied = 0;
    long timeo;
    int ret;

    DEFINE_WAIT(wait);

    trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);

    if (flags & (MSG_OOB | MSG_TRUNC))
        return -EOPNOTSUPP;

    timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);

try_again:
    lock_sock(&rx->sk);

    /* Return immediately if a client socket has no outstanding calls */
    if (RB_EMPTY_ROOT(&rx->calls) &&
            list_empty(&rx->recvmsg_q) &&
            rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
        release_sock(&rx->sk);
        return -ENODATA;
    }

    if (list_empty(&rx->recvmsg_q)) {
        ret = -EWOULDBLOCK;
        if (timeo == 0) {
            call = NULL;
            goto error_no_call;
        }

        release_sock(&rx->sk);

        /* Wait for something to happen */
        prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
                                  TASK_INTERRUPTIBLE);
        ret = sock_error(&rx->sk);
        if (ret)
            goto wait_error;

        if (list_empty(&rx->recvmsg_q)) {
            if (signal_pending(current))
                goto wait_interrupted;
            trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
                                0, 0, 0, 0);
            timeo = schedule_timeout(timeo);
        }
        finish_wait(sk_sleep(&rx->sk), &wait);
        goto try_again;
    }

    /* Find the next call and dequeue it if we're not just peeking.  If we
     * do dequeue it, that comes with a ref that we will need to release.
     */
    write_lock_bh(&rx->recvmsg_lock);
    l = rx->recvmsg_q.next;
    call = list_entry(l, struct rxrpc_call, recvmsg_link);
    if (!(flags & MSG_PEEK))
        list_del_init(&call->recvmsg_link);
    else
        rxrpc_get_call(call, rxrpc_call_got);
    write_unlock_bh(&rx->recvmsg_lock);

    trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);

    if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
        BUG();

    if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
        if (flags & MSG_CMSG_COMPAT) {
            unsigned int id32 = call->user_call_ID;

            ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
                           sizeof(unsigned int), &id32);
        } else {
            ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
                           sizeof(unsigned long),
                           &call->user_call_ID);
        }
        if (ret < 0)
            goto error;
    }

    if (msg->msg_name) {
        size_t len = sizeof(call->conn->params.peer->srx);
        memcpy(msg->msg_name, &call->conn->params.peer->srx, len);
        msg->msg_namelen = len;
    }

    switch (call->state) {
    case RXRPC_CALL_SERVER_ACCEPTING:
        ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
        break;
    case RXRPC_CALL_CLIENT_RECV_REPLY:
    case RXRPC_CALL_SERVER_RECV_REQUEST:
    case RXRPC_CALL_SERVER_ACK_REQUEST:
        ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
                                 flags, &copied);
        if (ret == -EAGAIN)
            ret = 0;

        if (after(call->rx_top, call->rx_hard_ack) &&
                call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
            rxrpc_notify_socket(call);
        break;
    default:
        ret = 0;
        break;
    }

    if (ret < 0)
        goto error;

    if (call->state == RXRPC_CALL_COMPLETE) {
        ret = rxrpc_recvmsg_term(call, msg);
        if (ret < 0)
            goto error;
        if (!(flags & MSG_PEEK))
            rxrpc_release_call(rx, call);
        msg->msg_flags |= MSG_EOR;
        ret = 1;
    }

    if (ret == 0)
        msg->msg_flags |= MSG_MORE;
    else
        msg->msg_flags &= ~MSG_MORE;
    ret = copied;

error:
    rxrpc_put_call(call, rxrpc_call_put);
error_no_call:
    release_sock(&rx->sk);
    trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
    return ret;

wait_interrupted:
    ret = sock_intr_errno(timeo);
wait_error:
    finish_wait(sk_sleep(&rx->sk), &wait);
    call = NULL;
    goto error_no_call;
}
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
		  struct msghdr *msg, size_t len, int flags)
{
	struct rxrpc_skb_priv *sp;
	struct rxrpc_call *call = NULL, *continue_call = NULL;
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	struct sk_buff *skb;
	long timeo;
	int copy, ret, ullen, offset, copied = 0;
	u32 abort_code;

	DEFINE_WAIT(wait);

	_enter(",,,%zu,%d", len, flags);

	if (flags & (MSG_OOB | MSG_TRUNC))
		return -EOPNOTSUPP;

	ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);

	timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
	msg->msg_flags |= MSG_MORE;

	lock_sock(&rx->sk);

	for (;;) {
		if (RB_EMPTY_ROOT(&rx->calls)) {
			if (copied)
				goto out;
			if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
				release_sock(&rx->sk);
				if (continue_call)
					rxrpc_put_call(continue_call);
				return -ENODATA;
			}
		}

		
		skb = skb_peek(&rx->sk.sk_receive_queue);
		if (!skb) {
			
			if (copied &&
			    (msg->msg_flags & MSG_PEEK || timeo == 0))
				goto out;

			
			release_sock(&rx->sk);
			prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
						  TASK_INTERRUPTIBLE);
			ret = sock_error(&rx->sk);
			if (ret)
				goto wait_error;

			if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
				if (signal_pending(current))
					goto wait_interrupted;
				timeo = schedule_timeout(timeo);
			}
			finish_wait(sk_sleep(&rx->sk), &wait);
			lock_sock(&rx->sk);
			continue;
		}

	peek_next_packet:
		sp = rxrpc_skb(skb);
		call = sp->call;
		ASSERT(call != NULL);

		_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);

		
		spin_lock_bh(&call->lock);
		spin_unlock_bh(&call->lock);

		if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
			_debug("packet from released call");
			if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
				BUG();
			rxrpc_free_skb(skb);
			continue;
		}

		
		if (continue_call) {
			_debug("maybe cont");
			if (call != continue_call ||
			    skb->mark != RXRPC_SKB_MARK_DATA) {
				release_sock(&rx->sk);
				rxrpc_put_call(continue_call);
				_leave(" = %d [noncont]", copied);
				return copied;
			}
		}

		rxrpc_get_call(call);

		
		if (!continue_call) {
			if (msg->msg_name && msg->msg_namelen > 0)
				memcpy(msg->msg_name,
				       &call->conn->trans->peer->srx,
				       sizeof(call->conn->trans->peer->srx));
			sock_recv_ts_and_drops(msg, &rx->sk, skb);
		}

		
		if (skb->mark != RXRPC_SKB_MARK_DATA)
			goto receive_non_data_message;

		_debug("recvmsg DATA #%u { %d, %d }",
		       ntohl(sp->hdr.seq), skb->len, sp->offset);

		if (!continue_call) {
			
			ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
				       ullen, &call->user_call_ID);
			if (ret < 0)
				goto copy_error;
			ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
		}

		ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
		ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
		call->rx_data_recv = ntohl(sp->hdr.seq);

		ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);

		offset = sp->offset;
		copy = skb->len - offset;
		if (copy > len - copied)
			copy = len - copied;

		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
			ret = skb_copy_datagram_iovec(skb, offset,
						      msg->msg_iov, copy);
		} else {
			ret = skb_copy_and_csum_datagram_iovec(skb, offset,
							       msg->msg_iov);
			if (ret == -EINVAL)
				goto csum_copy_error;
		}

		if (ret < 0)
			goto copy_error;

		
		_debug("copied %d+%d", copy, copied);

		offset += copy;
		copied += copy;

		if (!(flags & MSG_PEEK))
			sp->offset = offset;

		if (sp->offset < skb->len) {
			_debug("buffer full");
			ASSERTCMP(copied, ==, len);
			break;
		}

		
		if (sp->hdr.flags & RXRPC_LAST_PACKET) {
			_debug("last");
			if (call->conn->out_clientflag) {
				 
				ret = copied;
				goto terminal_message;
			}

			
			if (!(flags & MSG_PEEK)) {
				_debug("eat packet");
				if (skb_dequeue(&rx->sk.sk_receive_queue) !=
				    skb)
					BUG();
				rxrpc_free_skb(skb);
			}
			msg->msg_flags &= ~MSG_MORE;
			break;
		}

		
		_debug("next");
		if (!continue_call)
			continue_call = sp->call;
		else
			rxrpc_put_call(call);
		call = NULL;

		if (flags & MSG_PEEK) {
			_debug("peek next");
			skb = skb->next;
			if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
				break;
			goto peek_next_packet;
		}

		_debug("eat packet");
		if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
			BUG();
		rxrpc_free_skb(skb);
	}