コード例 #1
0
void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
{
	struct channel_backend *chanb = &bufb->chan->backend;
	const struct lib_ring_buffer_config *config = &chanb->config;
	unsigned long num_subbuf_alloc;
	unsigned int i;

	num_subbuf_alloc = chanb->num_subbuf;
	if (chanb->extra_reader_sb)
		num_subbuf_alloc++;

	for (i = 0; i < chanb->num_subbuf; i++)
		bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
	if (chanb->extra_reader_sb)
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
						num_subbuf_alloc - 1);
	else
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);

	for (i = 0; i < num_subbuf_alloc; i++) {
		/* Don't reset mmap_offset */
		v_set(config, &bufb->array[i]->records_commit, 0);
		v_set(config, &bufb->array[i]->records_unread, 0);
		bufb->array[i]->data_size = 0;
		/* Don't reset backend page and virt addresses */
	}
	/* Don't reset num_pages_per_subbuf, cpu, allocated */
	v_set(config, &bufb->records_read, 0);
}
コード例 #2
0
void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
				   struct lttng_ust_shm_handle *handle)
{
	struct channel_backend *chanb;
	const struct lttng_ust_lib_ring_buffer_config *config;
	unsigned long num_subbuf_alloc;
	unsigned int i;

	chanb = &shmp(handle, bufb->chan)->backend;
	if (!chanb)
		abort();
	config = &chanb->config;

	num_subbuf_alloc = chanb->num_subbuf;
	if (chanb->extra_reader_sb)
		num_subbuf_alloc++;

	for (i = 0; i < chanb->num_subbuf; i++) {
		struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;

		sb = shmp_index(handle, bufb->buf_wsb, i);
		if (!sb)
			abort();
		sb->id = subbuffer_id(config, 0, 1, i);
	}
	if (chanb->extra_reader_sb)
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
						num_subbuf_alloc - 1);
	else
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);

	for (i = 0; i < num_subbuf_alloc; i++) {
		struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
		struct lttng_ust_lib_ring_buffer_backend_pages *pages;

		sbp = shmp_index(handle, bufb->array, i);
		if (!sbp)
			abort();
		pages = shmp(handle, sbp->shmp);
		if (!pages)
			abort();
		/* Don't reset mmap_offset */
		v_set(config, &pages->records_commit, 0);
		v_set(config, &pages->records_unread, 0);
		pages->data_size = 0;
		/* Don't reset backend page and virt addresses */
	}
	/* Don't reset num_pages_per_subbuf, cpu, allocated */
	v_set(config, &bufb->records_read, 0);
}
コード例 #3
0
/**
 * lib_ring_buffer_backend_allocate - allocate a channel buffer
 * @config: ring buffer instance configuration
 * @buf: the buffer struct
 * @size: total size of the buffer
 * @num_subbuf: number of subbuffers
 * @extra_reader_sb: need extra subbuffer for reader
 */
static
int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
				     struct lttng_ust_lib_ring_buffer_backend *bufb,
				     size_t size, size_t num_subbuf,
				     int extra_reader_sb,
				     struct lttng_ust_shm_handle *handle,
				     struct shm_object *shmobj)
{
	struct channel_backend *chanb;
	unsigned long subbuf_size, mmap_offset = 0;
	unsigned long num_subbuf_alloc;
	unsigned long i;
	long page_size;

	chanb = &shmp(handle, bufb->chan)->backend;
	if (!chanb)
		return -EINVAL;

	subbuf_size = chanb->subbuf_size;
	num_subbuf_alloc = num_subbuf;

	if (extra_reader_sb)
		num_subbuf_alloc++;

	page_size = sysconf(_SC_PAGE_SIZE);
	if (page_size <= 0) {
		goto page_size_error;
	}

	align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
	set_shmp(bufb->array, zalloc_shm(shmobj,
			sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
	if (caa_unlikely(!shmp(handle, bufb->array)))
		goto array_error;

	/*
	 * This is the largest element (the buffer pages) which needs to
	 * be aligned on page size.
	 */
	align_shm(shmobj, page_size);
	set_shmp(bufb->memory_map, zalloc_shm(shmobj,
			subbuf_size * num_subbuf_alloc));
	if (caa_unlikely(!shmp(handle, bufb->memory_map)))
		goto memory_map_error;

	/* Allocate backend pages array elements */
	for (i = 0; i < num_subbuf_alloc; i++) {
		align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
		set_shmp(shmp_index(handle, bufb->array, i)->shmp,
			zalloc_shm(shmobj,
				sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
		if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
			goto free_array;
	}

	/* Allocate write-side subbuffer table */
	align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
	set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
				sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
				* num_subbuf));
	if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
		goto free_array;

	for (i = 0; i < num_subbuf; i++) {
		struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;

		sb = shmp_index(handle, bufb->buf_wsb, i);
		if (!sb)
			goto free_array;
		sb->id = subbuffer_id(config, 0, 1, i);
	}

	/* Assign read-side subbuffer table */
	if (extra_reader_sb)
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
						num_subbuf_alloc - 1);
	else
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);

	/* Allocate subbuffer packet counter table */
	align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
	set_shmp(bufb->buf_cnt, zalloc_shm(shmobj,
				sizeof(struct lttng_ust_lib_ring_buffer_backend_counts)
				* num_subbuf));
	if (caa_unlikely(!shmp(handle, bufb->buf_cnt)))
		goto free_wsb;

	/* Assign pages to page index */
	for (i = 0; i < num_subbuf_alloc; i++) {
		struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
		struct lttng_ust_lib_ring_buffer_backend_pages *pages;
		struct shm_ref ref;

		ref.index = bufb->memory_map._ref.index;
		ref.offset = bufb->memory_map._ref.offset;
		ref.offset += i * subbuf_size;

		sbp = shmp_index(handle, bufb->array, i);
		if (!sbp)
			goto free_array;
		pages = shmp(handle, sbp->shmp);
		if (!pages)
			goto free_array;
		set_shmp(pages->p, ref);
		if (config->output == RING_BUFFER_MMAP) {
			pages->mmap_offset = mmap_offset;
			mmap_offset += subbuf_size;
		}
	}
	return 0;

free_wsb:
	/* bufb->buf_wsb will be freed by shm teardown */
free_array:
	/* bufb->array[i] will be freed by shm teardown */
memory_map_error:
	/* bufb->array will be freed by shm teardown */
array_error:
page_size_error:
	return -ENOMEM;
}
コード例 #4
0
/**
 * lib_ring_buffer_backend_allocate - allocate a channel buffer
 * @config: ring buffer instance configuration
 * @buf: the buffer struct
 * @size: total size of the buffer
 * @num_subbuf: number of subbuffers
 * @extra_reader_sb: need extra subbuffer for reader
 */
static
int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
				     struct lib_ring_buffer_backend *bufb,
				     size_t size, size_t num_subbuf,
				     int extra_reader_sb)
{
	struct channel_backend *chanb = &bufb->chan->backend;
	unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
	unsigned long subbuf_size, mmap_offset = 0;
	unsigned long num_subbuf_alloc;
	struct page **pages;
	unsigned long i;

	num_pages = size >> PAGE_SHIFT;
	num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
	subbuf_size = chanb->subbuf_size;
	num_subbuf_alloc = num_subbuf;

	if (extra_reader_sb) {
		num_pages += num_pages_per_subbuf; /* Add pages for reader */
		num_subbuf_alloc++;
	}

	pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
				   1 << INTERNODE_CACHE_SHIFT),
			cpu_to_node(max(bufb->cpu, 0)));
	if (unlikely(!pages))
		goto pages_error;

	bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
					 * num_subbuf_alloc,
				  1 << INTERNODE_CACHE_SHIFT),
			GFP_KERNEL | __GFP_NOWARN,
			cpu_to_node(max(bufb->cpu, 0)));
	if (unlikely(!bufb->array))
		goto array_error;

	for (i = 0; i < num_pages; i++) {
		pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
				GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
		if (unlikely(!pages[i]))
			goto depopulate;
	}
	bufb->num_pages_per_subbuf = num_pages_per_subbuf;

	/* Allocate backend pages array elements */
	for (i = 0; i < num_subbuf_alloc; i++) {
		bufb->array[i] =
			lttng_kvzalloc_node(ALIGN(
				sizeof(struct lib_ring_buffer_backend_pages) +
				sizeof(struct lib_ring_buffer_backend_page)
				* num_pages_per_subbuf,
				1 << INTERNODE_CACHE_SHIFT),
				GFP_KERNEL | __GFP_NOWARN,
				cpu_to_node(max(bufb->cpu, 0)));
		if (!bufb->array[i])
			goto free_array;
	}

	/* Allocate write-side subbuffer table */
	bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
				sizeof(struct lib_ring_buffer_backend_subbuffer)
				* num_subbuf,
				1 << INTERNODE_CACHE_SHIFT),
				GFP_KERNEL | __GFP_NOWARN,
				cpu_to_node(max(bufb->cpu, 0)));
	if (unlikely(!bufb->buf_wsb))
		goto free_array;

	for (i = 0; i < num_subbuf; i++)
		bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);

	/* Assign read-side subbuffer table */
	if (extra_reader_sb)
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
						num_subbuf_alloc - 1);
	else
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);

	/* Allocate subbuffer packet counter table */
	bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
				sizeof(struct lib_ring_buffer_backend_counts)
				* num_subbuf,
				1 << INTERNODE_CACHE_SHIFT),
			GFP_KERNEL | __GFP_NOWARN,
			cpu_to_node(max(bufb->cpu, 0)));
	if (unlikely(!bufb->buf_cnt))
		goto free_wsb;

	/* Assign pages to page index */
	for (i = 0; i < num_subbuf_alloc; i++) {
		for (j = 0; j < num_pages_per_subbuf; j++) {
			CHAN_WARN_ON(chanb, page_idx > num_pages);
			bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
			bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
			page_idx++;
		}
		if (config->output == RING_BUFFER_MMAP) {
			bufb->array[i]->mmap_offset = mmap_offset;
			mmap_offset += subbuf_size;
		}
	}

	/*
	 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
	 * will not fault.
	 */
	wrapper_vmalloc_sync_all();
	vfree(pages);
	return 0;

free_wsb:
	lttng_kvfree(bufb->buf_wsb);
free_array:
	for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
		lttng_kvfree(bufb->array[i]);
depopulate:
	/* Free all allocated pages */
	for (i = 0; (i < num_pages && pages[i]); i++)
		__free_page(pages[i]);
	lttng_kvfree(bufb->array);
array_error:
	vfree(pages);
pages_error:
	return -ENOMEM;
}