static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; struct pcpu_chunk *chunk; struct page *pages; unsigned long flags; int i; chunk = pcpu_alloc_chunk(gfp); if (!chunk) return NULL; pages = alloc_pages(gfp, order_base_2(nr_pages)); if (!pages) { pcpu_free_chunk(chunk); return NULL; } for (i = 0; i < nr_pages; i++) pcpu_set_page_chunk(nth_page(pages, i), chunk); chunk->data = pages; chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; spin_lock_irqsave(&pcpu_lock, flags); pcpu_chunk_populated(chunk, 0, nr_pages, false); spin_unlock_irqrestore(&pcpu_lock, flags); pcpu_stats_chunk_alloc(); trace_percpu_create_chunk(chunk->base_addr); return chunk; }
static struct pcpu_chunk *pcpu_create_chunk(void) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; struct pcpu_chunk *chunk; struct page *pages; int i; chunk = pcpu_alloc_chunk(); if (!chunk) return NULL; pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); if (!pages) { pcpu_free_chunk(chunk); return NULL; } for (i = 0; i < nr_pages; i++) pcpu_set_page_chunk(nth_page(pages, i), chunk); chunk->data = pages; chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; spin_lock_irq(&pcpu_lock); pcpu_chunk_populated(chunk, 0, nr_pages); spin_unlock_irq(&pcpu_lock); return chunk; }
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; if (chunk && chunk->data) __free_pages(chunk->data, order_base_2(nr_pages)); pcpu_free_chunk(chunk); }
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; pcpu_stats_chunk_dealloc(); trace_percpu_destroy_chunk(chunk->base_addr); if (chunk->data) pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); pcpu_free_chunk(chunk); }
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; if (!chunk) return; pcpu_stats_chunk_dealloc(); trace_percpu_destroy_chunk(chunk->base_addr); if (chunk->data) __free_pages(chunk->data, order_base_2(nr_pages)); pcpu_free_chunk(chunk); }
static struct pcpu_chunk *pcpu_create_chunk(void) { struct pcpu_chunk *chunk; struct vm_struct **vms; chunk = pcpu_alloc_chunk(); if (!chunk) return NULL; vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, pcpu_nr_groups, pcpu_atom_size); if (!vms) { pcpu_free_chunk(chunk); return NULL; } chunk->data = vms; chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; return chunk; }
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { if (chunk && chunk->data) pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); pcpu_free_chunk(chunk); }