Exemplo n.º 1
0
unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
		struct lib_ring_buffer *buf)
{
	unsigned int mask = 0;
	struct channel *chan = buf->backend.chan;
	const struct lib_ring_buffer_config *config = &chan->backend.config;
	int finalized, disabled;

	if (filp->f_mode & FMODE_READ) {
		poll_wait_set_exclusive(wait);
		poll_wait(filp, &buf->read_wait, wait);

		finalized = lib_ring_buffer_is_finalized(config, buf);
		disabled = lib_ring_buffer_channel_is_disabled(chan);

		/*
		 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
		 * finalized load before offsets loads.
		 */
		WARN_ON(atomic_long_read(&buf->active_readers) != 1);
retry:
		if (disabled)
			return POLLERR;

		if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
		  - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
		  == 0) {
			if (finalized)
				return POLLHUP;
			else {
				/*
				 * The memory barriers
				 * __wait_event()/wake_up_interruptible() take
				 * care of "raw_spin_is_locked" memory ordering.
				 */
				if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
					goto retry;
				else
					return 0;
			}
		} else {
			if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
					 chan)
			  - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
					 chan)
			  >= chan->backend.buf_size)
				return POLLPRI | POLLRDBAND;
			else
				return POLLIN | POLLRDNORM;
		}
	}
	return mask;
}
Exemplo n.º 2
0
/*
 *	subbuf_splice_actor - splice up to one subbuf's worth of data
 */
static int subbuf_splice_actor(struct file *in,
			       loff_t *ppos,
			       struct pipe_inode_info *pipe,
			       size_t len,
			       unsigned int flags,
			       struct lib_ring_buffer *buf)
{
	struct channel *chan = buf->backend.chan;
	const struct lib_ring_buffer_config *config = &chan->backend.config;
	unsigned int poff, subbuf_pages, nr_pages;
	struct page *pages[PIPE_DEF_BUFFERS];
	struct partial_page partial[PIPE_DEF_BUFFERS];
	struct splice_pipe_desc spd = {
		.pages = pages,
		.nr_pages = 0,
		.partial = partial,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
		.flags = flags,
#endif
		.ops = &ring_buffer_pipe_buf_ops,
		.spd_release = lib_ring_buffer_page_release,
	};
	unsigned long consumed_old, roffset;
	unsigned long bytes_avail;

	/*
	 * Check that a GET_SUBBUF ioctl has been done before.
	 */
	WARN_ON(atomic_long_read(&buf->active_readers) != 1);
	consumed_old = lib_ring_buffer_get_consumed(config, buf);
	consumed_old += *ppos;

	/*
	 * Adjust read len, if longer than what is available.
	 * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
	 * protection.
	 */
	bytes_avail = chan->backend.subbuf_size;
	WARN_ON(bytes_avail > chan->backend.buf_size);
	len = min_t(size_t, len, bytes_avail);
	subbuf_pages = bytes_avail >> PAGE_SHIFT;
	nr_pages = min_t(unsigned int, subbuf_pages, PIPE_DEF_BUFFERS);
	roffset = consumed_old & PAGE_MASK;
	poff = consumed_old & ~PAGE_MASK;
	printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
		   len, (ssize_t)*ppos, lib_ring_buffer_get_offset(config, buf));

	for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
		unsigned int this_len;
		unsigned long *pfnp, new_pfn;
		struct page *new_page;
		void **virt;

		if (!len)
			break;
		printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
			   len, roffset);

		/*
		 * We have to replace the page we are moving into the splice
		 * pipe.
		 */
		new_page = alloc_pages_node(cpu_to_node(max(buf->backend.cpu,
							    0)),
					    GFP_KERNEL | __GFP_ZERO, 0);
		if (!new_page)
			break;
		new_pfn = page_to_pfn(new_page);
		this_len = PAGE_SIZE - poff;
		pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, roffset, &virt);
		spd.pages[spd.nr_pages] = pfn_to_page(*pfnp);
		*pfnp = new_pfn;
		*virt = page_address(new_page);
		spd.partial[spd.nr_pages].offset = poff;
		spd.partial[spd.nr_pages].len = this_len;

		poff = 0;
		roffset += PAGE_SIZE;
		len -= this_len;
	}

	if (!spd.nr_pages)
		return 0;

	return wrapper_splice_to_pipe(pipe, &spd);
}

ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
				    struct pipe_inode_info *pipe, size_t len,
				    unsigned int flags,
				    struct lib_ring_buffer *buf)
{
	struct channel *chan = buf->backend.chan;
	const struct lib_ring_buffer_config *config = &chan->backend.config;
	ssize_t spliced;
	int ret;

	if (config->output != RING_BUFFER_SPLICE)
		return -EINVAL;

	/*
	 * We require ppos and length to be page-aligned for performance reasons
	 * (no page copy). Size is known using the ioctl
	 * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
	 * We fail when the ppos or len passed is not page-sized, because splice
	 * is not allowed to copy more than the length passed as parameter (so
	 * the ABI does not let us silently copy more than requested to include
	 * padding).
	 */
	if (*ppos != PAGE_ALIGN(*ppos) || len != PAGE_ALIGN(len))
		return -EINVAL;

	ret = 0;
	spliced = 0;

	printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n", len,
		   (ssize_t)*ppos);
	while (len && !spliced) {
		ret = subbuf_splice_actor(in, ppos, pipe, len, flags, buf);
		printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
		if (ret < 0)
			break;
		else if (!ret) {
			if (flags & SPLICE_F_NONBLOCK)
				ret = -EAGAIN;
			break;
		}

		*ppos += ret;
		if (ret > len)
			len = 0;
		else
			len -= ret;
		spliced += ret;
	}

	if (spliced)
		return spliced;

	return ret;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read);

ssize_t vfs_lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
				    struct pipe_inode_info *pipe, size_t len,
				    unsigned int flags)
{
	struct lib_ring_buffer *buf = in->private_data;

	return lib_ring_buffer_splice_read(in, ppos, pipe, len, flags, buf);
}
EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_splice_read);