Пример #1
0
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
	int page_start = PFN_DOWN(off);
	int page_end = PFN_UP(off + size);
	struct page **pages;
	unsigned long *populated;
	int rs, re;

	
	rs = page_start;
	pcpu_next_unpop(chunk, &rs, &re, page_end);
	if (rs == page_start && re == page_end)
		return;

	
	WARN_ON(chunk->immutable);

	pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
	BUG_ON(!pages);

	
	pcpu_pre_unmap_flush(chunk, page_start, page_end);

	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
		pcpu_unmap_pages(chunk, pages, populated, rs, re);

	

	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
		pcpu_free_pages(chunk, pages, populated, rs, re);

	
	bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
}
Пример #2
0
/**
 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
 * @chunk: chunk to depopulate
 * @page_start: the start page
 * @page_end: the end page
 *
 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
 * from @chunk.
 *
 * CONTEXT:
 * pcpu_alloc_mutex.
 */
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
				  int page_start, int page_end)
{
	struct page **pages;

	/*
	 * If control reaches here, there must have been at least one
	 * successful population attempt so the temp pages array must
	 * be available now.
	 */
	pages = pcpu_get_pages();
	BUG_ON(!pages);

	/* unmap and free */
	pcpu_pre_unmap_flush(chunk, page_start, page_end);

	pcpu_unmap_pages(chunk, pages, page_start, page_end);

	/* no need to flush tlb, vmalloc will handle it lazily */

	pcpu_free_pages(chunk, pages, page_start, page_end);
}
Пример #3
0
/**
 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
 * @chunk: chunk to depopulate
 * @off: offset to the area to depopulate
 * @size: size of the area to depopulate in bytes
 * @flush: whether to flush cache and tlb or not
 *
 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
 * from @chunk.  If @flush is true, vcache is flushed before unmapping
 * and tlb after.
 *
 * CONTEXT:
 * pcpu_alloc_mutex.
 */
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
{
    int page_start = PFN_DOWN(off);
    int page_end = PFN_UP(off + size);
    struct page **pages;
    unsigned long *populated;
    int rs, re;

    /* quick path, check whether it's empty already */
    rs = page_start;
    pcpu_next_unpop(chunk, &rs, &re, page_end);
    if (rs == page_start && re == page_end)
        return;

    /* immutable chunks can't be depopulated */
    WARN_ON(chunk->immutable);

    /*
     * If control reaches here, there must have been at least one
     * successful population attempt so the temp pages array must
     * be available now.
     */
    pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
    BUG_ON(!pages);

    /* unmap and free */
    pcpu_pre_unmap_flush(chunk, page_start, page_end);

    pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
    pcpu_unmap_pages(chunk, pages, populated, rs, re);

    /* no need to flush tlb, vmalloc will handle it lazily */

    pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
    pcpu_free_pages(chunk, pages, populated, rs, re);

    /* commit new bitmap */
    bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
}