static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
	int page_start = PFN_DOWN(off);
	int page_end = PFN_UP(off + size);
	struct page **pages;
	unsigned long *populated;
	int rs, re;

	
	rs = page_start;
	pcpu_next_unpop(chunk, &rs, &re, page_end);
	if (rs == page_start && re == page_end)
		return;

	
	WARN_ON(chunk->immutable);

	pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
	BUG_ON(!pages);

	
	pcpu_pre_unmap_flush(chunk, page_start, page_end);

	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
		pcpu_unmap_pages(chunk, pages, populated, rs, re);

	

	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
		pcpu_free_pages(chunk, pages, populated, rs, re);

	
	bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
}
/**
 * pcpu_alloc_pages - allocates pages for @chunk
 * @chunk: target chunk
 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
 * @populated: populated bitmap
 * @page_start: page index of the first page to be allocated
 * @page_end: page index of the last page to be allocated + 1
 *
 * Allocate pages [@page_start,@page_end) into @pages for all units.
 * The allocation is for @chunk.  Percpu core doesn't care about the
 * content of @pages and will pass it verbatim to pcpu_map_pages().
 */
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
			    struct page **pages, unsigned long *populated,
			    int page_start, int page_end)
{
	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
	unsigned int cpu;
	int nid;
	int i;

	for_each_possible_cpu(cpu) {
		for (i = page_start; i < page_end; i++) {
			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];

			//*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
			nid = cpu_to_node(cpu);

			if((nid == -1) || !(node_zonelist(nid, GFP_KERNEL)->_zonerefs->zone))
				nid = numa_node_id();

			*pagep = alloc_pages_node(nid, gfp, 0);
			
			if (!*pagep) {
				pcpu_free_pages(chunk, pages, populated,
						page_start, page_end);
				return -ENOMEM;
			}
		}
	}
	return 0;
}
Beispiel #3
0
/*! 2016-04-02 study -ing */
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
			    struct page **pages, unsigned long *populated,
			    int page_start, int page_end)
{
	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
	unsigned int cpu;
	int i;

	for_each_possible_cpu(cpu) {
		for (i = page_start; i < page_end; i++) {
			/*! pages에서 pcpu page 의 index에 해당하는 위치를 찾는다.
			 *  (pages 는 포인터 배열)
			 */
			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];

			/*! 해당 위치에 pages node alloc */
			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
			if (!*pagep) {
				/*! alloc 실패 시 free  */
				pcpu_free_pages(chunk, pages, populated,
						page_start, page_end);
				return -ENOMEM;
			}
		}
	}
	return 0;
}
Beispiel #4
0
/**
 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
 * @chunk: chunk of interest
 * @page_start: the start page
 * @page_end: the end page
 *
 * For each cpu, populate and map pages [@page_start,@page_end) into
 * @chunk.
 *
 * CONTEXT:
 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
 */
static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
			       int page_start, int page_end)
{
	struct page **pages;

	pages = pcpu_get_pages();
	if (!pages)
		return -ENOMEM;

	if (pcpu_alloc_pages(chunk, pages, page_start, page_end))
		return -ENOMEM;

	if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
		pcpu_free_pages(chunk, pages, page_start, page_end);
		return -ENOMEM;
	}
	pcpu_post_map_flush(chunk, page_start, page_end);

	return 0;
}
Beispiel #5
0
/**
 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
 * @chunk: chunk to depopulate
 * @page_start: the start page
 * @page_end: the end page
 *
 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
 * from @chunk.
 *
 * CONTEXT:
 * pcpu_alloc_mutex.
 */
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
				  int page_start, int page_end)
{
	struct page **pages;

	/*
	 * If control reaches here, there must have been at least one
	 * successful population attempt so the temp pages array must
	 * be available now.
	 */
	pages = pcpu_get_pages();
	BUG_ON(!pages);

	/* unmap and free */
	pcpu_pre_unmap_flush(chunk, page_start, page_end);

	pcpu_unmap_pages(chunk, pages, page_start, page_end);

	/* no need to flush tlb, vmalloc will handle it lazily */

	pcpu_free_pages(chunk, pages, page_start, page_end);
}
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
			    struct page **pages, unsigned long *populated,
			    int page_start, int page_end)
{
	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
	unsigned int cpu;
	int i;

	for_each_possible_cpu(cpu) {
		for (i = page_start; i < page_end; i++) {
			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];

			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
			if (!*pagep) {
				pcpu_free_pages(chunk, pages, populated,
						page_start, page_end);
				return -ENOMEM;
			}
		}
	}
	return 0;
}
Beispiel #7
0
/**
 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
 * @chunk: chunk to depopulate
 * @off: offset to the area to depopulate
 * @size: size of the area to depopulate in bytes
 * @flush: whether to flush cache and tlb or not
 *
 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
 * from @chunk.  If @flush is true, vcache is flushed before unmapping
 * and tlb after.
 *
 * CONTEXT:
 * pcpu_alloc_mutex.
 */
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
    int page_start = PFN_DOWN(off);
    int page_end = PFN_UP(off + size);
    struct page **pages;
    unsigned long *populated;
    int rs, re;

    /* quick path, check whether it's empty already */
    rs = page_start;
    pcpu_next_unpop(chunk, &rs, &re, page_end);
    if (rs == page_start && re == page_end)
        return;

    /* immutable chunks can't be depopulated */
    WARN_ON(chunk->immutable);

    /*
     * If control reaches here, there must have been at least one
     * successful population attempt so the temp pages array must
     * be available now.
     */
    pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
    BUG_ON(!pages);

    /* unmap and free */
    pcpu_pre_unmap_flush(chunk, page_start, page_end);

    pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
    pcpu_unmap_pages(chunk, pages, populated, rs, re);

    /* no need to flush tlb, vmalloc will handle it lazily */

    pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
    pcpu_free_pages(chunk, pages, populated, rs, re);

    /* commit new bitmap */
    bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
}