Esempio n. 1
0
static void
soaio_process_sb(struct socket *so, struct sockbuf *sb)
{
	struct kaiocb *job;

	SOCKBUF_LOCK(sb);
	while (!TAILQ_EMPTY(&sb->sb_aiojobq) && soaio_ready(so, sb)) {
		job = TAILQ_FIRST(&sb->sb_aiojobq);
		TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
		if (!aio_clear_cancel_function(job))
			continue;

		soaio_process_job(so, sb, job);
	}

	/*
	 * If there are still pending requests, the socket must not be
	 * ready so set SB_AIO to request a wakeup when the socket
	 * becomes ready.
	 */
	if (!TAILQ_EMPTY(&sb->sb_aiojobq))
		sb->sb_flags |= SB_AIO;
	sb->sb_flags &= ~SB_AIO_RUNNING;
	SOCKBUF_UNLOCK(sb);

	ACCEPT_LOCK();
	SOCK_LOCK(so);
	sorele(so);
}
Esempio n. 2
0
static int
soo_aio_queue(struct file *fp, struct kaiocb *job)
{
	struct socket *so;
	struct sockbuf *sb;
	int error;

	so = fp->f_data;
	error = (*so->so_proto->pr_usrreqs->pru_aio_queue)(so, job);
	if (error == 0)
		return (0);

	switch (job->uaiocb.aio_lio_opcode) {
	case LIO_READ:
		sb = &so->so_rcv;
		break;
	case LIO_WRITE:
		sb = &so->so_snd;
		break;
	default:
		return (EINVAL);
	}

	SOCKBUF_LOCK(sb);
	if (!aio_set_cancel_function(job, soo_aio_cancel))
		panic("new job was cancelled");
	TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list);
	if (!(sb->sb_flags & SB_AIO_RUNNING)) {
		if (soaio_ready(so, sb))
			sowakeup_aio(so, sb);
		else
			sb->sb_flags |= SB_AIO;
	}
	SOCKBUF_UNLOCK(sb);
	return (0);
}
Esempio n. 3
0
static void
soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
{
	struct ucred *td_savedcred;
	struct thread *td;
	struct file *fp;
	struct uio uio;
	struct iovec iov;
	size_t cnt;
	int error, flags;

	SOCKBUF_UNLOCK(sb);
	aio_switch_vmspace(job);
	td = curthread;
	fp = job->fd_file;
retry:
	td_savedcred = td->td_ucred;
	td->td_ucred = job->cred;

	cnt = job->uaiocb.aio_nbytes;
	iov.iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf;
	iov.iov_len = cnt;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = 0;
	uio.uio_resid = cnt;
	uio.uio_segflg = UIO_USERSPACE;
	uio.uio_td = td;
	flags = MSG_NBIO;

	/* TODO: Charge ru_msg* to job. */

	if (sb == &so->so_rcv) {
		uio.uio_rw = UIO_READ;
#ifdef MAC
		error = mac_socket_check_receive(fp->f_cred, so);
		if (error == 0)

#endif
			error = soreceive(so, NULL, &uio, NULL, NULL, &flags);
	} else {
		uio.uio_rw = UIO_WRITE;
#ifdef MAC
		error = mac_socket_check_send(fp->f_cred, so);
		if (error == 0)
#endif
			error = sosend(so, NULL, &uio, NULL, NULL, flags, td);
		if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
			PROC_LOCK(job->userproc);
			kern_psignal(job->userproc, SIGPIPE);
			PROC_UNLOCK(job->userproc);
		}
	}

	cnt -= uio.uio_resid;
	td->td_ucred = td_savedcred;

	/* XXX: Not sure if this is needed? */
	if (cnt != 0 && (error == ERESTART || error == EINTR ||
	    error == EWOULDBLOCK))
		error = 0;
	if (error == EWOULDBLOCK) {
		/*
		 * A read() or write() on the socket raced with this
		 * request.  If the socket is now ready, try again.
		 * If it is not, place this request at the head of the
		 * queue to try again when the socket is ready.
		 */
		SOCKBUF_LOCK(sb);		
		empty_results++;
		if (soaio_ready(so, sb)) {
			empty_retries++;
			SOCKBUF_UNLOCK(sb);
			goto retry;
		}

		if (!aio_set_cancel_function(job, soo_aio_cancel)) {
			MPASS(cnt == 0);
			SOCKBUF_UNLOCK(sb);
			aio_cancel(job);
			SOCKBUF_LOCK(sb);
		} else {
			TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
		}
	} else {
		aio_complete(job, cnt, error);
		SOCKBUF_LOCK(sb);
	}
}
Esempio n. 4
0
static void
soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
{
	struct ucred *td_savedcred;
	struct thread *td;
	struct file *fp;
	struct uio uio;
	struct iovec iov;
	size_t cnt, done;
	long ru_before;
	int error, flags;

	SOCKBUF_UNLOCK(sb);
	aio_switch_vmspace(job);
	td = curthread;
	fp = job->fd_file;
retry:
	td_savedcred = td->td_ucred;
	td->td_ucred = job->cred;

	done = job->aio_done;
	cnt = job->uaiocb.aio_nbytes - done;
	iov.iov_base = (void *)((uintptr_t)job->uaiocb.aio_buf + done);
	iov.iov_len = cnt;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = 0;
	uio.uio_resid = cnt;
	uio.uio_segflg = UIO_USERSPACE;
	uio.uio_td = td;
	flags = MSG_NBIO;

	/*
	 * For resource usage accounting, only count a completed request
	 * as a single message to avoid counting multiple calls to
	 * sosend/soreceive on a blocking socket.
	 */

	if (sb == &so->so_rcv) {
		uio.uio_rw = UIO_READ;
		ru_before = td->td_ru.ru_msgrcv;
#ifdef MAC
		error = mac_socket_check_receive(fp->f_cred, so);
		if (error == 0)

#endif
			error = soreceive(so, NULL, &uio, NULL, NULL, &flags);
		if (td->td_ru.ru_msgrcv != ru_before)
			job->msgrcv = 1;
	} else {
		uio.uio_rw = UIO_WRITE;
		ru_before = td->td_ru.ru_msgsnd;
#ifdef MAC
		error = mac_socket_check_send(fp->f_cred, so);
		if (error == 0)
#endif
			error = sosend(so, NULL, &uio, NULL, NULL, flags, td);
		if (td->td_ru.ru_msgsnd != ru_before)
			job->msgsnd = 1;
		if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
			PROC_LOCK(job->userproc);
			kern_psignal(job->userproc, SIGPIPE);
			PROC_UNLOCK(job->userproc);
		}
	}

	done += cnt - uio.uio_resid;
	job->aio_done = done;
	td->td_ucred = td_savedcred;

	if (error == EWOULDBLOCK) {
		/*
		 * The request was either partially completed or not
		 * completed at all due to racing with a read() or
		 * write() on the socket.  If the socket is
		 * non-blocking, return with any partial completion.
		 * If the socket is blocking or if no progress has
		 * been made, requeue this request at the head of the
		 * queue to try again when the socket is ready.
		 */
		MPASS(done != job->uaiocb.aio_nbytes);
		SOCKBUF_LOCK(sb);
		if (done == 0 || !(so->so_state & SS_NBIO)) {
			empty_results++;
			if (soaio_ready(so, sb)) {
				empty_retries++;
				SOCKBUF_UNLOCK(sb);
				goto retry;
			}
			
			if (!aio_set_cancel_function(job, soo_aio_cancel)) {
				SOCKBUF_UNLOCK(sb);
				if (done != 0)
					aio_complete(job, done, 0);
				else
					aio_cancel(job);
				SOCKBUF_LOCK(sb);
			} else {
				TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
			}
			return;
		}
		SOCKBUF_UNLOCK(sb);
	}		
	if (done != 0 && (error == ERESTART || error == EINTR ||
	    error == EWOULDBLOCK))
		error = 0;
	if (error)
		aio_complete(job, -1, error);
	else
		aio_complete(job, done, 0);
	SOCKBUF_LOCK(sb);
}