コード例 #1
0
void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
{
	struct channel_backend *chanb = &bufb->chan->backend;
	unsigned long i, j, num_subbuf_alloc;

	num_subbuf_alloc = chanb->num_subbuf;
	if (chanb->extra_reader_sb)
		num_subbuf_alloc++;

	lttng_kvfree(bufb->buf_wsb);
	lttng_kvfree(bufb->buf_cnt);
	for (i = 0; i < num_subbuf_alloc; i++) {
		for (j = 0; j < bufb->num_pages_per_subbuf; j++)
			__free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
		lttng_kvfree(bufb->array[i]);
	}
	lttng_kvfree(bufb->array);
	bufb->allocated = 0;
}
コード例 #2
0
ファイル: lttng_prio_heap.c プロジェクト: lttng/lttng-modules
/*
 * Copy of heap->ptrs pointer is invalid after heap_grow.
 */
static
int heap_grow(struct lttng_ptr_heap *heap, size_t new_len)
{
	void **new_ptrs;

	if (heap->alloc_len >= new_len)
		return 0;

	heap->alloc_len = max_t(size_t, new_len, heap->alloc_len << 1);
	new_ptrs = lttng_kvmalloc(heap->alloc_len * sizeof(void *), heap->gfpmask);
	if (!new_ptrs)
		return -ENOMEM;
	if (heap->ptrs)
		memcpy(new_ptrs, heap->ptrs, heap->len * sizeof(void *));
	lttng_kvfree(heap->ptrs);
	heap->ptrs = new_ptrs;
	return 0;
}
コード例 #3
0
ファイル: lttng_prio_heap.c プロジェクト: lttng/lttng-modules
void lttng_heap_free(struct lttng_ptr_heap *heap)
{
	lttng_kvfree(heap->ptrs);
}
コード例 #4
0
/**
 * lib_ring_buffer_backend_allocate - allocate a channel buffer
 * @config: ring buffer instance configuration
 * @buf: the buffer struct
 * @size: total size of the buffer
 * @num_subbuf: number of subbuffers
 * @extra_reader_sb: need extra subbuffer for reader
 */
static
int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
				     struct lib_ring_buffer_backend *bufb,
				     size_t size, size_t num_subbuf,
				     int extra_reader_sb)
{
	struct channel_backend *chanb = &bufb->chan->backend;
	unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
	unsigned long subbuf_size, mmap_offset = 0;
	unsigned long num_subbuf_alloc;
	struct page **pages;
	unsigned long i;

	num_pages = size >> PAGE_SHIFT;
	num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
	subbuf_size = chanb->subbuf_size;
	num_subbuf_alloc = num_subbuf;

	if (extra_reader_sb) {
		num_pages += num_pages_per_subbuf; /* Add pages for reader */
		num_subbuf_alloc++;
	}

	pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
				   1 << INTERNODE_CACHE_SHIFT),
			cpu_to_node(max(bufb->cpu, 0)));
	if (unlikely(!pages))
		goto pages_error;

	bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
					 * num_subbuf_alloc,
				  1 << INTERNODE_CACHE_SHIFT),
			GFP_KERNEL | __GFP_NOWARN,
			cpu_to_node(max(bufb->cpu, 0)));
	if (unlikely(!bufb->array))
		goto array_error;

	for (i = 0; i < num_pages; i++) {
		pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
				GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
		if (unlikely(!pages[i]))
			goto depopulate;
	}
	bufb->num_pages_per_subbuf = num_pages_per_subbuf;

	/* Allocate backend pages array elements */
	for (i = 0; i < num_subbuf_alloc; i++) {
		bufb->array[i] =
			lttng_kvzalloc_node(ALIGN(
				sizeof(struct lib_ring_buffer_backend_pages) +
				sizeof(struct lib_ring_buffer_backend_page)
				* num_pages_per_subbuf,
				1 << INTERNODE_CACHE_SHIFT),
				GFP_KERNEL | __GFP_NOWARN,
				cpu_to_node(max(bufb->cpu, 0)));
		if (!bufb->array[i])
			goto free_array;
	}

	/* Allocate write-side subbuffer table */
	bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
				sizeof(struct lib_ring_buffer_backend_subbuffer)
				* num_subbuf,
				1 << INTERNODE_CACHE_SHIFT),
				GFP_KERNEL | __GFP_NOWARN,
				cpu_to_node(max(bufb->cpu, 0)));
	if (unlikely(!bufb->buf_wsb))
		goto free_array;

	for (i = 0; i < num_subbuf; i++)
		bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);

	/* Assign read-side subbuffer table */
	if (extra_reader_sb)
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
						num_subbuf_alloc - 1);
	else
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);

	/* Allocate subbuffer packet counter table */
	bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
				sizeof(struct lib_ring_buffer_backend_counts)
				* num_subbuf,
				1 << INTERNODE_CACHE_SHIFT),
			GFP_KERNEL | __GFP_NOWARN,
			cpu_to_node(max(bufb->cpu, 0)));
	if (unlikely(!bufb->buf_cnt))
		goto free_wsb;

	/* Assign pages to page index */
	for (i = 0; i < num_subbuf_alloc; i++) {
		for (j = 0; j < num_pages_per_subbuf; j++) {
			CHAN_WARN_ON(chanb, page_idx > num_pages);
			bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
			bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
			page_idx++;
		}
		if (config->output == RING_BUFFER_MMAP) {
			bufb->array[i]->mmap_offset = mmap_offset;
			mmap_offset += subbuf_size;
		}
	}

	/*
	 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
	 * will not fault.
	 */
	wrapper_vmalloc_sync_all();
	vfree(pages);
	return 0;

free_wsb:
	lttng_kvfree(bufb->buf_wsb);
free_array:
	for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
		lttng_kvfree(bufb->array[i]);
depopulate:
	/* Free all allocated pages */
	for (i = 0; (i < num_pages && pages[i]); i++)
		__free_page(pages[i]);
	lttng_kvfree(bufb->array);
array_error:
	vfree(pages);
pages_error:
	return -ENOMEM;
}