/**
 *	lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
 *	@nb: notifier block
 *	@action: hotplug action to take
 *	@hcpu: CPU number
 *
 *	Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
 */
static
int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
					      unsigned long action,
					      void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct channel_backend *chanb = container_of(nb, struct channel_backend,
						     cpu_hp_notifier);
	const struct lib_ring_buffer_config *config = &chanb->config;
	struct lib_ring_buffer *buf;
	int ret;

	CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		buf = per_cpu_ptr(chanb->buf, cpu);
		ret = lib_ring_buffer_create(buf, chanb, cpu);
		if (ret) {
			printk(KERN_ERR
			  "ring_buffer_cpu_hp_callback: cpu %d "
			  "buffer creation failed\n", cpu);
			return NOTIFY_BAD;
		}
		break;
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		/* No need to do a buffer switch here, because it will happen
		 * when tracing is stopped, or will be done by switch timer CPU
		 * DEAD callback. */
		break;
	}
	return NOTIFY_OK;
}
/*
 * No need to implement a "dead" callback to do a buffer switch here,
 * because it will happen when tracing is stopped, or will be done by
 * switch timer CPU DEAD callback.
 * We don't free buffers when CPU go away, because it would make trace
 * data vanish, which is unwanted.
 */
int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
		struct lttng_cpuhp_node *node)
{
	struct channel_backend *chanb = container_of(node,
			struct channel_backend, cpuhp_prepare);
	const struct lib_ring_buffer_config *config = &chanb->config;
	struct lib_ring_buffer *buf;
	int ret;

	CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);

	buf = per_cpu_ptr(chanb->buf, cpu);
	ret = lib_ring_buffer_create(buf, chanb, cpu);
	if (ret) {
		printk(KERN_ERR
		  "ring_buffer_cpu_hp_callback: cpu %d "
		  "buffer creation failed\n", cpu);
		return ret;
	}
	return 0;
}
/**
 * channel_backend_init - initialize a channel backend
 * @chanb: channel backend
 * @name: channel name
 * @config: client ring buffer configuration
 * @parent: dentry of parent directory, %NULL for root directory
 * @subbuf_size: size of sub-buffers (> page size, power of 2)
 * @num_subbuf: number of sub-buffers (power of 2)
 * @lttng_ust_shm_handle: shared memory handle
 * @stream_fds: stream file descriptors.
 *
 * Returns channel pointer if successful, %NULL otherwise.
 *
 * Creates per-cpu channel buffers using the sizes and attributes
 * specified.  The created channel buffer files will be named
 * name_0...name_N-1.  File permissions will be %S_IRUSR.
 *
 * Called with CPU hotplug disabled.
 */
int channel_backend_init(struct channel_backend *chanb,
			 const char *name,
			 const struct lttng_ust_lib_ring_buffer_config *config,
			 size_t subbuf_size, size_t num_subbuf,
			 struct lttng_ust_shm_handle *handle,
			 const int *stream_fds)
{
	struct channel *chan = caa_container_of(chanb, struct channel, backend);
	unsigned int i;
	int ret;
	size_t shmsize = 0, num_subbuf_alloc;
	long page_size;

	if (!name)
		return -EPERM;

	page_size = sysconf(_SC_PAGE_SIZE);
	if (page_size <= 0) {
		return -ENOMEM;
	}
	/* Check that the subbuffer size is larger than a page. */
	if (subbuf_size < page_size)
		return -EINVAL;

	/*
	 * Make sure the number of subbuffers and subbuffer size are
	 * power of 2, and nonzero.
	 */
	if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
		return -EINVAL;
	if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
		return -EINVAL;
	/*
	 * Overwrite mode buffers require at least 2 subbuffers per
	 * buffer.
	 */
	if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
		return -EINVAL;

	ret = subbuffer_id_check_index(config, num_subbuf);
	if (ret)
		return ret;

	chanb->buf_size = num_subbuf * subbuf_size;
	chanb->subbuf_size = subbuf_size;
	chanb->buf_size_order = get_count_order(chanb->buf_size);
	chanb->subbuf_size_order = get_count_order(subbuf_size);
	chanb->num_subbuf_order = get_count_order(num_subbuf);
	chanb->extra_reader_sb =
			(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
	chanb->num_subbuf = num_subbuf;
	strncpy(chanb->name, name, NAME_MAX);
	chanb->name[NAME_MAX - 1] = '\0';
	memcpy(&chanb->config, config, sizeof(*config));

	/* Per-cpu buffer size: control (prior to backend) */
	shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
	shmsize += sizeof(struct lttng_ust_lib_ring_buffer);

	/* Per-cpu buffer size: backend */
	/* num_subbuf + 1 is the worse case */
	num_subbuf_alloc = num_subbuf + 1;
	shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
	shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
	shmsize += offset_align(shmsize, page_size);
	shmsize += subbuf_size * num_subbuf_alloc;
	shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
	shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
	shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
	shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
	/* Per-cpu buffer size: control (after backend) */
	shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
	shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
	shmsize += offset_align(shmsize, __alignof__(struct commit_counters_cold));
	shmsize += sizeof(struct commit_counters_cold) * num_subbuf;

	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
		struct lttng_ust_lib_ring_buffer *buf;
		/*
		 * We need to allocate for all possible cpus.
		 */
		for_each_possible_cpu(i) {
			struct shm_object *shmobj;

			shmobj = shm_object_table_alloc(handle->table, shmsize,
					SHM_OBJECT_SHM, stream_fds[i]);
			if (!shmobj)
				goto end;
			align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
			set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
			buf = shmp(handle, chanb->buf[i].shmp);
			if (!buf)
				goto end;
			set_shmp(buf->self, chanb->buf[i].shmp._ref);
			ret = lib_ring_buffer_create(buf, chanb, i,
					handle, shmobj);
			if (ret)
				goto free_bufs;	/* cpu hotplug locked */
		}
	} else {
/**
 * channel_backend_init - initialize a channel backend
 * @chanb: channel backend
 * @name: channel name
 * @config: client ring buffer configuration
 * @priv: client private data
 * @parent: dentry of parent directory, %NULL for root directory
 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
 * @num_subbuf: number of sub-buffers (power of 2)
 *
 * Returns channel pointer if successful, %NULL otherwise.
 *
 * Creates per-cpu channel buffers using the sizes and attributes
 * specified.  The created channel buffer files will be named
 * name_0...name_N-1.  File permissions will be %S_IRUSR.
 *
 * Called with CPU hotplug disabled.
 */
int channel_backend_init(struct channel_backend *chanb,
			 const char *name,
			 const struct lib_ring_buffer_config *config,
			 void *priv, size_t subbuf_size, size_t num_subbuf)
{
	struct channel *chan = container_of(chanb, struct channel, backend);
	unsigned int i;
	int ret;

	if (!name)
		return -EPERM;

	/* Check that the subbuffer size is larger than a page. */
	if (subbuf_size < PAGE_SIZE)
		return -EINVAL;

	/*
	 * Make sure the number of subbuffers and subbuffer size are
	 * power of 2 and nonzero.
	 */
	if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
		return -EINVAL;
	if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
		return -EINVAL;
	/*
	 * Overwrite mode buffers require at least 2 subbuffers per
	 * buffer.
	 */
	if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
		return -EINVAL;

	ret = subbuffer_id_check_index(config, num_subbuf);
	if (ret)
		return ret;

	chanb->priv = priv;
	chanb->buf_size = num_subbuf * subbuf_size;
	chanb->subbuf_size = subbuf_size;
	chanb->buf_size_order = get_count_order(chanb->buf_size);
	chanb->subbuf_size_order = get_count_order(subbuf_size);
	chanb->num_subbuf_order = get_count_order(num_subbuf);
	chanb->extra_reader_sb =
			(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
	chanb->num_subbuf = num_subbuf;
	strlcpy(chanb->name, name, NAME_MAX);
	memcpy(&chanb->config, config, sizeof(chanb->config));

	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
		if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
			return -ENOMEM;
	}

	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
		/* Allocating the buffer per-cpu structures */
		chanb->buf = alloc_percpu(struct lib_ring_buffer);
		if (!chanb->buf)
			goto free_cpumask;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
		chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
		ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
			&chanb->cpuhp_prepare.node);
		if (ret)
			goto free_bufs;
#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */

		{
			/*
			 * In case of non-hotplug cpu, if the ring-buffer is allocated
			 * in early initcall, it will not be notified of secondary cpus.
			 * In that off case, we need to allocate for all possible cpus.
			 */
#ifdef CONFIG_HOTPLUG_CPU
			/*
			 * buf->backend.allocated test takes care of concurrent CPU
			 * hotplug.
			 * Priority higher than frontend, so we create the ring buffer
			 * before we start the timer.
			 */
			chanb->cpu_hp_notifier.notifier_call =
					lib_ring_buffer_cpu_hp_callback;
			chanb->cpu_hp_notifier.priority = 5;
			register_hotcpu_notifier(&chanb->cpu_hp_notifier);

			get_online_cpus();
			for_each_online_cpu(i) {
				ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
							 chanb, i);
				if (ret)
					goto free_bufs;	/* cpu hotplug locked */
			}
			put_online_cpus();
#else
			for_each_possible_cpu(i) {
				ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
							 chanb, i);
				if (ret)
					goto free_bufs;
			}
#endif
		}
#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
	} else {