unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
		struct lib_ring_buffer *buf)
{
	unsigned int mask = 0;
	struct channel *chan = buf->backend.chan;
	const struct lib_ring_buffer_config *config = &chan->backend.config;
	int finalized, disabled;

	if (filp->f_mode & FMODE_READ) {
		poll_wait_set_exclusive(wait);
		poll_wait(filp, &buf->read_wait, wait);

		finalized = lib_ring_buffer_is_finalized(config, buf);
		disabled = lib_ring_buffer_channel_is_disabled(chan);

		/*
		 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
		 * finalized load before offsets loads.
		 */
		WARN_ON(atomic_long_read(&buf->active_readers) != 1);
retry:
		if (disabled)
			return POLLERR;

		if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
		  - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
		  == 0) {
			if (finalized)
				return POLLHUP;
			else {
				/*
				 * The memory barriers
				 * __wait_event()/wake_up_interruptible() take
				 * care of "raw_spin_is_locked" memory ordering.
				 */
				if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
					goto retry;
				else
					return 0;
			}
		} else {
			if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
					 chan)
			  - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
					 chan)
			  >= chan->backend.buf_size)
				return POLLPRI | POLLRDBAND;
			else
				return POLLIN | POLLRDNORM;
		}
	}
	return mask;
}
long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg, struct lib_ring_buffer *buf)
{
	struct channel *chan = buf->backend.chan;
	const struct lib_ring_buffer_config *config = &chan->backend.config;

	if (lib_ring_buffer_channel_is_disabled(chan))
		return -EIO;

	switch (cmd) {
	case RING_BUFFER_COMPAT_SNAPSHOT:
		/*
		 * First, ensure we perform a "final" flush onto the
		 * stream.  This will ensure we create a packet of
		 * padding if we encounter an empty packet. This ensures
		 * the time-stamps right before the snapshot is used as
		 * end of packet timestamp.
		 */
		if (!buf->quiescent)
			lib_ring_buffer_switch_remote_empty(buf);
		return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
						&buf->prod_snapshot);
	case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED:
		return compat_put_ulong(buf->cons_snapshot, arg);
	case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED:
		return compat_put_ulong(buf->prod_snapshot, arg);
	case RING_BUFFER_COMPAT_GET_SUBBUF:
	{
		__u32 uconsume;
		unsigned long consume;
		long ret;

		ret = get_user(uconsume, (__u32 __user *) arg);
		if (ret)
			return ret; /* will return -EFAULT */
		consume = buf->cons_snapshot;
		consume &= ~0xFFFFFFFFL;
		consume |= uconsume;
		ret = lib_ring_buffer_get_subbuf(buf, consume);
		if (!ret) {
			/* Set file position to zero at each successful "get" */
			filp->f_pos = 0;
		}
		return ret;
	}
	case RING_BUFFER_COMPAT_PUT_SUBBUF:
		lib_ring_buffer_put_subbuf(buf);
		return 0;

	case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF:
	{
		long ret;

		ret = lib_ring_buffer_get_next_subbuf(buf);
		if (!ret) {
			/* Set file position to zero at each successful "get" */
			filp->f_pos = 0;
		}
		return ret;
	}
	case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF:
		lib_ring_buffer_put_next_subbuf(buf);
		return 0;
	case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE:
	{
		unsigned long data_size;

		data_size = lib_ring_buffer_get_read_data_size(config, buf);
		if (data_size > UINT_MAX)
			return -EFBIG;
		return compat_put_ulong(data_size, arg);
	}
	case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE:
	{
		unsigned long size;

		size = lib_ring_buffer_get_read_data_size(config, buf);
		size = PAGE_ALIGN(size);
		if (size > UINT_MAX)
			return -EFBIG;
		return compat_put_ulong(size, arg);
	}
	case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE:
		if (chan->backend.subbuf_size > UINT_MAX)
			return -EFBIG;
		return compat_put_ulong(chan->backend.subbuf_size, arg);
	case RING_BUFFER_COMPAT_GET_MMAP_LEN:
	{
		unsigned long mmap_buf_len;

		if (config->output != RING_BUFFER_MMAP)
			return -EINVAL;
		mmap_buf_len = chan->backend.buf_size;
		if (chan->backend.extra_reader_sb)
			mmap_buf_len += chan->backend.subbuf_size;
		if (mmap_buf_len > UINT_MAX)
			return -EFBIG;
		return compat_put_ulong(mmap_buf_len, arg);
	}
	case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET:
	{
		unsigned long sb_bindex, read_offset;

		if (config->output != RING_BUFFER_MMAP)
			return -EINVAL;
		sb_bindex = subbuffer_id_get_index(config,
						   buf->backend.buf_rsb.id);
		read_offset = buf->backend.array[sb_bindex]->mmap_offset;
		if (read_offset > UINT_MAX)
			return -EINVAL;
		return compat_put_ulong(read_offset, arg);
	}
	case RING_BUFFER_COMPAT_FLUSH:
		lib_ring_buffer_switch_remote(buf);
		return 0;
	default:
		return -ENOIOCTLCMD;
	}
}
long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg, struct lib_ring_buffer *buf)
{
	struct channel *chan = buf->backend.chan;
	const struct lib_ring_buffer_config *config = &chan->backend.config;

	if (lib_ring_buffer_channel_is_disabled(chan))
		return -EIO;

	switch (cmd) {
	case RING_BUFFER_SNAPSHOT:
		return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
					    &buf->prod_snapshot);
	case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
		return put_ulong(buf->cons_snapshot, arg);
	case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
		return put_ulong(buf->prod_snapshot, arg);
	case RING_BUFFER_GET_SUBBUF:
	{
		unsigned long uconsume;
		long ret;

		ret = get_user(uconsume, (unsigned long __user *) arg);
		if (ret)
			return ret; /* will return -EFAULT */
		ret = lib_ring_buffer_get_subbuf(buf, uconsume);
		if (!ret) {
			/* Set file position to zero at each successful "get" */
			filp->f_pos = 0;
		}
		return ret;
	}
	case RING_BUFFER_PUT_SUBBUF:
		lib_ring_buffer_put_subbuf(buf);
		return 0;

	case RING_BUFFER_GET_NEXT_SUBBUF:
	{
		long ret;

		ret = lib_ring_buffer_get_next_subbuf(buf);
		if (!ret) {
			/* Set file position to zero at each successful "get" */
			filp->f_pos = 0;
		}
		return ret;
	}
	case RING_BUFFER_PUT_NEXT_SUBBUF:
		lib_ring_buffer_put_next_subbuf(buf);
		return 0;
	case RING_BUFFER_GET_SUBBUF_SIZE:
		return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
				 arg);
	case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
	{
		unsigned long size;

		size = lib_ring_buffer_get_read_data_size(config, buf);
		size = PAGE_ALIGN(size);
		return put_ulong(size, arg);
	}
	case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
		return put_ulong(chan->backend.subbuf_size, arg);
	case RING_BUFFER_GET_MMAP_LEN:
	{
		unsigned long mmap_buf_len;

		if (config->output != RING_BUFFER_MMAP)
			return -EINVAL;
		mmap_buf_len = chan->backend.buf_size;
		if (chan->backend.extra_reader_sb)
			mmap_buf_len += chan->backend.subbuf_size;
		if (mmap_buf_len > INT_MAX)
			return -EFBIG;
		return put_ulong(mmap_buf_len, arg);
	}
	case RING_BUFFER_GET_MMAP_READ_OFFSET:
	{
		unsigned long sb_bindex;

		if (config->output != RING_BUFFER_MMAP)
			return -EINVAL;
		sb_bindex = subbuffer_id_get_index(config,
						   buf->backend.buf_rsb.id);
		return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
				 arg);
	}
	case RING_BUFFER_FLUSH:
		lib_ring_buffer_switch_remote(buf);
		return 0;
	default:
		return -ENOIOCTLCMD;
	}
}