Exemplo n.º 1
0
/*
 * check a range of space and convert unwritten extents to written.
 */
int ext4_end_io_nolock(ext4_io_end_t *io)
{
	struct inode *inode = io->inode;
	loff_t offset = io->offset;
	ssize_t size = io->size;
	int ret = 0;

	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
		   "list->prev 0x%p\n",
		   io, inode->i_ino, io->list.next, io->list.prev);

	if (list_empty(&io->list))
		return ret;

	if (!(io->flag & EXT4_IO_END_UNWRITTEN))
		return ret;

	ret = ext4_convert_unwritten_extents(inode, offset, size);
	if (ret < 0) {
		printk(KERN_EMERG "%s: failed to convert unwritten "
			"extents to written extents, error is %d "
			"io is still on inode %lu aio dio list\n",
		       __func__, ret, inode->i_ino);
		return ret;
	}

	if (io->iocb)
		aio_complete(io->iocb, io->result, 0);
	/* clear the DIO AIO unwritten flag */
	io->flag &= ~EXT4_IO_END_UNWRITTEN;
	return ret;
}
Exemplo n.º 2
0
/*
 * check a range of space and convert unwritten extents to written.
 *
 * Called with inode->i_mutex; we depend on this when we manipulate
 * io->flag, since we could otherwise race with ext4_flush_completed_IO()
 */
int ext4_end_io_nolock(ext4_io_end_t *io)
{
	struct inode *inode = io->inode;
	loff_t offset = io->offset;
	ssize_t size = io->size;
	int ret = 0;

	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
		   "list->prev 0x%p\n",
		   io, inode->i_ino, io->list.next, io->list.prev);

	ret = ext4_convert_unwritten_extents(inode, offset, size);
	if (ret < 0) {
		ext4_msg(inode->i_sb, KERN_EMERG,
			 "failed to convert unwritten extents to written "
			 "extents -- potential data loss!  "
			 "(inode %lu, offset %llu, size %zd, error %d)",
			 inode->i_ino, offset, size, ret);
	}

	/* Wake up anyone waiting on unwritten extent conversion */
	if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
		wake_up_all(ext4_ioend_wq(io->inode));
	if (io->flag & EXT4_IO_END_DIRECT)
		inode_dio_done(inode);
	if (io->iocb)
		aio_complete(io->iocb, io->result, 0);
	return ret;
}
Exemplo n.º 3
0
static void
soo_aio_cancel(struct kaiocb *job)
{
	struct socket *so;
	struct sockbuf *sb;
	long done;
	int opcode;

	so = job->fd_file->f_data;
	opcode = job->uaiocb.aio_lio_opcode;
	if (opcode == LIO_READ)
		sb = &so->so_rcv;
	else {
		MPASS(opcode == LIO_WRITE);
		sb = &so->so_snd;
	}

	SOCKBUF_LOCK(sb);
	if (!aio_cancel_cleared(job))
		TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
	if (TAILQ_EMPTY(&sb->sb_aiojobq))
		sb->sb_flags &= ~SB_AIO;
	SOCKBUF_UNLOCK(sb);

	done = job->aio_done;
	if (done != 0)
		aio_complete(job, done, 0);
	else
		aio_cancel(job);
}
Exemplo n.º 4
0
int async_poll(struct kiocb *iocb, int events)
{
	unsigned int mask;
	async_poll_table *pasync;
	poll_table *p;

	/* Fast path */
	if (iocb->filp->f_op && iocb->filp->f_op->poll) {
		mask = iocb->filp->f_op->poll(iocb->filp, NULL);
		mask &= events | POLLERR | POLLHUP;
		if (mask & events)
			return mask;
	}

	pasync = kmem_cache_alloc(async_poll_table_cache, SLAB_KERNEL);
	if (!pasync)
		return -ENOMEM;

	p = (poll_table *)pasync;
	poll_initwait(p);
	wtd_set_action(&pasync->wtd, async_poll_complete, pasync);
	p->iocb = iocb;
	pasync->wake = 0;
	pasync->sync = 0;
	pasync->events = events;
	pasync->pt_page.entry = pasync->pt_page.entries;
	pasync->pt_page.size = sizeof(pasync->pt_page);
	p->table = &pasync->pt_page;

	iocb->data = p;
	iocb->users ++;
	wmb();

	mask = DEFAULT_POLLMASK;
	if (iocb->filp->f_op && iocb->filp->f_op->poll)
		mask = iocb->filp->f_op->poll(iocb->filp, p);
	mask &= events | POLLERR | POLLHUP;
	if (mask && xchg(&iocb->data, NULL)) {
		poll_freewait(p);
		aio_complete(iocb, mask, 0);
	}

	iocb->cancel = async_poll_cancel;
	aio_put_req(iocb);
	return 0;
}
Exemplo n.º 5
0
static void async_poll_waiter(wait_queue_t *wait)
{
	struct poll_table_entry *entry = (struct poll_table_entry *)wait;
	async_poll_table *pasync = (async_poll_table *)(entry->p);
	struct kiocb *iocb = pasync->pt.iocb;
	unsigned int mask;

	mask = iocb->filp->f_op->poll(iocb->filp, NULL);
	mask &= pasync->events | POLLERR | POLLHUP;
	if (mask) {
		poll_table *p2 = xchg(&iocb->data, NULL);
		if (p2) {
			__poll_freewait(p2, wait); 
			aio_complete(iocb, mask, 0);
		}
		return;
	}
}
Exemplo n.º 6
0
/*
 * IO write completion.
 */
STATIC void
xfs_end_io(
	struct work_struct *work)
{
	xfs_ioend_t	*ioend = container_of(work, xfs_ioend_t, io_work);
	struct xfs_inode *ip = XFS_I(ioend->io_inode);
	int		error = 0;

	/*
	 * For unwritten extents we need to issue transactions to convert a
	 * range to normal written extens after the data I/O has finished.
	 */
	if (ioend->io_type == IO_UNWRITTEN &&
	    likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {

		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
						 ioend->io_size);
		if (error)
			ioend->io_error = error;
	}

	/*
	 * We might have to update the on-disk file size after extending
	 * writes.
	 */
	error = xfs_setfilesize(ioend);
	ASSERT(!error || error == EAGAIN);

	/*
	 * If we didn't complete processing of the ioend, requeue it to the
	 * tail of the workqueue for another attempt later. Otherwise destroy
	 * it.
	 */
	if (error == EAGAIN) {
		atomic_inc(&ioend->io_remaining);
		xfs_finish_ioend(ioend);
		/* ensure we don't spin on blocked ioends */
		delay(1);
	} else {
		if (ioend->io_iocb)
			aio_complete(ioend->io_iocb, ioend->io_result, 0);
		xfs_destroy_ioend(ioend);
	}
}
Exemplo n.º 7
0
/*
 * check a range of space and convert unwritten extents to written.
 */
int ext4_end_io_nolock(ext4_io_end_t *io)
{
	struct inode *inode = io->inode;
	loff_t offset = io->offset;
	ssize_t size = io->size;
	wait_queue_head_t *wq;
	int ret = 0;

	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
		   "list->prev 0x%p\n",
		   io, inode->i_ino, io->list.next, io->list.prev);

	if (list_empty(&io->list))
		return ret;

	if (!(io->flag & EXT4_IO_END_UNWRITTEN))
		return ret;

	ret = ext4_convert_unwritten_extents(inode, offset, size);
	if (ret < 0) {
		printk(KERN_EMERG "%s: failed to convert unwritten "
			"extents to written extents, error is %d "
			"io is still on inode %lu aio dio list\n",
		       __func__, ret, inode->i_ino);
		return ret;
	}

	if (io->iocb)
		aio_complete(io->iocb, io->result, 0);
	/* clear the DIO AIO unwritten flag */
	if (io->flag & EXT4_IO_END_UNWRITTEN) {
		io->flag &= ~EXT4_IO_END_UNWRITTEN;
		/* Wake up anyone waiting on unwritten extent conversion */
		wq = ext4_ioend_wq(io->inode);
		if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) &&
		    waitqueue_active(wq)) {
			wake_up_all(wq);
		}
	}

	return ret;
}
STATIC void
xfs_destroy_ioend(
	xfs_ioend_t		*ioend)
{
	struct buffer_head	*bh, *next;

	for (bh = ioend->io_buffer_head; bh; bh = next) {
		next = bh->b_private;
		bh->b_end_io(bh, !ioend->io_error);
	}

	if (ioend->io_iocb) {
		if (ioend->io_isasync) {
			aio_complete(ioend->io_iocb, ioend->io_error ?
					ioend->io_error : ioend->io_result, 0);
		}
		inode_dio_done(ioend->io_inode);
	}

	mempool_free(ioend, xfs_ioend_pool);
}
Exemplo n.º 9
0
static void ext4_release_io_end(ext4_io_end_t *io_end)
{
	struct bio *bio, *next_bio;

	BUG_ON(!list_empty(&io_end->list));
	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
	WARN_ON(io_end->handle);

	if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
		wake_up_all(ext4_ioend_wq(io_end->inode));

	for (bio = io_end->bio; bio; bio = next_bio) {
		next_bio = bio->bi_private;
		ext4_finish_bio(bio);
		bio_put(bio);
	}
	if (io_end->flag & EXT4_IO_END_DIRECT)
		inode_dio_done(io_end->inode);
	if (io_end->iocb)
		aio_complete(io_end->iocb, io_end->result, 0);
	kmem_cache_free(io_end_cachep, io_end);
}
Exemplo n.º 10
0
void async_poll_complete(void *data)
{
	async_poll_table *pasync = data;
	poll_table *p = data;
	struct kiocb	*iocb = p->iocb;
	unsigned int	mask;

	pasync->wake = 0;
	wmb();
	do {
		mask = iocb->filp->f_op->poll(iocb->filp, p);
		mask &= pasync->events | POLLERR | POLLHUP;
		if (mask) {
			poll_table *p2 = xchg(&iocb->data, NULL);
			if (p2) {
				poll_freewait(p2); 
				aio_complete(iocb, mask, 0);
			}
			return;
		}
		pasync->sync = 0;
		wmb();
	} while (pasync->wake);
}
Exemplo n.º 11
0
/*
 * "Complete" an asynchronous operation.
 */
static void char_device_do_deferred_op(void *p)
{
        struct async_work *stuff = (struct async_work *) p;
        aio_complete(stuff->iocb, stuff->result, 0);
        kfree(stuff);
}
Exemplo n.º 12
0
static void
soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
{
	struct ucred *td_savedcred;
	struct thread *td;
	struct file *fp;
	struct uio uio;
	struct iovec iov;
	size_t cnt;
	int error, flags;

	SOCKBUF_UNLOCK(sb);
	aio_switch_vmspace(job);
	td = curthread;
	fp = job->fd_file;
retry:
	td_savedcred = td->td_ucred;
	td->td_ucred = job->cred;

	cnt = job->uaiocb.aio_nbytes;
	iov.iov_base = (void *)(uintptr_t)job->uaiocb.aio_buf;
	iov.iov_len = cnt;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = 0;
	uio.uio_resid = cnt;
	uio.uio_segflg = UIO_USERSPACE;
	uio.uio_td = td;
	flags = MSG_NBIO;

	/* TODO: Charge ru_msg* to job. */

	if (sb == &so->so_rcv) {
		uio.uio_rw = UIO_READ;
#ifdef MAC
		error = mac_socket_check_receive(fp->f_cred, so);
		if (error == 0)

#endif
			error = soreceive(so, NULL, &uio, NULL, NULL, &flags);
	} else {
		uio.uio_rw = UIO_WRITE;
#ifdef MAC
		error = mac_socket_check_send(fp->f_cred, so);
		if (error == 0)
#endif
			error = sosend(so, NULL, &uio, NULL, NULL, flags, td);
		if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
			PROC_LOCK(job->userproc);
			kern_psignal(job->userproc, SIGPIPE);
			PROC_UNLOCK(job->userproc);
		}
	}

	cnt -= uio.uio_resid;
	td->td_ucred = td_savedcred;

	/* XXX: Not sure if this is needed? */
	if (cnt != 0 && (error == ERESTART || error == EINTR ||
	    error == EWOULDBLOCK))
		error = 0;
	if (error == EWOULDBLOCK) {
		/*
		 * A read() or write() on the socket raced with this
		 * request.  If the socket is now ready, try again.
		 * If it is not, place this request at the head of the
		 * queue to try again when the socket is ready.
		 */
		SOCKBUF_LOCK(sb);		
		empty_results++;
		if (soaio_ready(so, sb)) {
			empty_retries++;
			SOCKBUF_UNLOCK(sb);
			goto retry;
		}

		if (!aio_set_cancel_function(job, soo_aio_cancel)) {
			MPASS(cnt == 0);
			SOCKBUF_UNLOCK(sb);
			aio_cancel(job);
			SOCKBUF_LOCK(sb);
		} else {
			TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
		}
	} else {
		aio_complete(job, cnt, error);
		SOCKBUF_LOCK(sb);
	}
}
Exemplo n.º 13
0
/*
 * "Complete" an asynchronous operation.
 */
static void scullc_do_deferred_op(struct work_struct *work)
{
	struct async_work *stuff = (struct async_work *) work;
	aio_complete(stuff->iocb, stuff->result, 0);
	kfree(stuff);
}
Exemplo n.º 14
0
static void scullc_do_deferred_op(struct work_struct *work)
{
struct async_work * stuff = container_of(work, struct async_work, dwork.work); /*Long*/
        aio_complete(stuff->iocb, stuff->result, 0);
        kfree(stuff);
}
Exemplo n.º 15
0
static void
soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
{
	struct ucred *td_savedcred;
	struct thread *td;
	struct file *fp;
	struct uio uio;
	struct iovec iov;
	size_t cnt, done;
	long ru_before;
	int error, flags;

	SOCKBUF_UNLOCK(sb);
	aio_switch_vmspace(job);
	td = curthread;
	fp = job->fd_file;
retry:
	td_savedcred = td->td_ucred;
	td->td_ucred = job->cred;

	done = job->aio_done;
	cnt = job->uaiocb.aio_nbytes - done;
	iov.iov_base = (void *)((uintptr_t)job->uaiocb.aio_buf + done);
	iov.iov_len = cnt;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = 0;
	uio.uio_resid = cnt;
	uio.uio_segflg = UIO_USERSPACE;
	uio.uio_td = td;
	flags = MSG_NBIO;

	/*
	 * For resource usage accounting, only count a completed request
	 * as a single message to avoid counting multiple calls to
	 * sosend/soreceive on a blocking socket.
	 */

	if (sb == &so->so_rcv) {
		uio.uio_rw = UIO_READ;
		ru_before = td->td_ru.ru_msgrcv;
#ifdef MAC
		error = mac_socket_check_receive(fp->f_cred, so);
		if (error == 0)

#endif
			error = soreceive(so, NULL, &uio, NULL, NULL, &flags);
		if (td->td_ru.ru_msgrcv != ru_before)
			job->msgrcv = 1;
	} else {
		uio.uio_rw = UIO_WRITE;
		ru_before = td->td_ru.ru_msgsnd;
#ifdef MAC
		error = mac_socket_check_send(fp->f_cred, so);
		if (error == 0)
#endif
			error = sosend(so, NULL, &uio, NULL, NULL, flags, td);
		if (td->td_ru.ru_msgsnd != ru_before)
			job->msgsnd = 1;
		if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
			PROC_LOCK(job->userproc);
			kern_psignal(job->userproc, SIGPIPE);
			PROC_UNLOCK(job->userproc);
		}
	}

	done += cnt - uio.uio_resid;
	job->aio_done = done;
	td->td_ucred = td_savedcred;

	if (error == EWOULDBLOCK) {
		/*
		 * The request was either partially completed or not
		 * completed at all due to racing with a read() or
		 * write() on the socket.  If the socket is
		 * non-blocking, return with any partial completion.
		 * If the socket is blocking or if no progress has
		 * been made, requeue this request at the head of the
		 * queue to try again when the socket is ready.
		 */
		MPASS(done != job->uaiocb.aio_nbytes);
		SOCKBUF_LOCK(sb);
		if (done == 0 || !(so->so_state & SS_NBIO)) {
			empty_results++;
			if (soaio_ready(so, sb)) {
				empty_retries++;
				SOCKBUF_UNLOCK(sb);
				goto retry;
			}
			
			if (!aio_set_cancel_function(job, soo_aio_cancel)) {
				SOCKBUF_UNLOCK(sb);
				if (done != 0)
					aio_complete(job, done, 0);
				else
					aio_cancel(job);
				SOCKBUF_LOCK(sb);
			} else {
				TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
			}
			return;
		}
		SOCKBUF_UNLOCK(sb);
	}		
	if (done != 0 && (error == ERESTART || error == EINTR ||
	    error == EWOULDBLOCK))
		error = 0;
	if (error)
		aio_complete(job, -1, error);
	else
		aio_complete(job, done, 0);
	SOCKBUF_LOCK(sb);
}