Beispiel #1
0
/*
 * start_isolate_page_range() -- make page-allocation-type of range of pages
 * to be MIGRATE_ISOLATE.
 * @start_pfn: The lower PFN of the range to be isolated.
 * @end_pfn: The upper PFN of the range to be isolated.
 * @migratetype: migrate type to set in error recovery.
 *
 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
 * the range will never be allocated. Any free pages and pages freed in the
 * future will not be allocated again.
 *
 * start_pfn/end_pfn must be aligned to pageblock_order.
 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
 */
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
			     unsigned migratetype, bool skip_hwpoisoned_pages)
{
	unsigned long pfn;
	unsigned long undo_pfn;
	struct page *page;

	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));

	for (pfn = start_pfn;
	     pfn < end_pfn;
	     pfn += pageblock_nr_pages) {
		page = __first_valid_page(pfn, pageblock_nr_pages);
		if (page &&
		    set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
			undo_pfn = pfn;
			goto undo;
		}
	}
	return 0;
undo:
	for (pfn = start_pfn;
	     pfn < undo_pfn;
	     pfn += pageblock_nr_pages)
		unset_migratetype_isolate(pfn_to_page(pfn), migratetype);

	return -EBUSY;
}
/*
 * start_isolate_page_range() -- make page-allocation-type of range of pages
 * to be MIGRATE_ISOLATE.
 * @start_pfn: The lower PFN of the range to be isolated.
 * @end_pfn: The upper PFN of the range to be isolated.
 *
 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
 * the range will never be allocated. Any free pages and pages freed in the
 * future will not be allocated again.
 *
 * start_pfn/end_pfn must be aligned to pageblock_order.
 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
 */
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long pfn;
	unsigned long undo_pfn;
	struct page *page;

	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));

	for (pfn = start_pfn;
	     pfn < end_pfn;
	     pfn += pageblock_nr_pages) {
		page = __first_valid_page(pfn, pageblock_nr_pages);
		if (page && set_migratetype_isolate(page)) {
			undo_pfn = pfn;
			goto undo;
		}
	}
	return 0;
undo:
	for (pfn = start_pfn;
	     pfn < undo_pfn;
	     pfn += pageblock_nr_pages)
		unset_migratetype_isolate(pfn_to_page(pfn));

	return -EBUSY;
}
/**
 * Claim a given page from the buddy subsystem. This only works, if the page registered within the buddy system and marked as free
 *
 */
int
try_claim_free_buddy_page(struct page* requested_page,
        unsigned int allowed_sources, struct page** allocated_page,
        unsigned long* actual_source) {
    int ret = CLAIMED_TRY_NEXT;

    if (allowed_sources & SOURCE_FREE_BUDDY_PAGE) {

        struct page * locked_page = NULL;
        unsigned long pfn = page_to_pfn(requested_page);
        unsigned int locked_page_count_after, locked_page_count_before;

        /*
         * Isolate the page, so that it doesn't get reallocated if it
         * was free.
         */
        set_migratetype_isolate(requested_page);
        locked_page_count_before = page_count(requested_page);
        if (0 == page_count(compound_head(requested_page))) {
            if (is_free_buddy_page(requested_page)) {
                printk(KERN_DEBUG "try_claim_free_buddy_page: %#lx free buddy page\n", pfn);
                /* get, while page is still isolated */
                locked_page = claim_free_buddy_page(requested_page);
            } else {
                printk(KERN_DEBUG
                        "try_claim_free_buddy_page: %#lx: unknown zero refcount page type %lx\n",
                        pfn, requested_page->flags);
            }
        } else {
            long cppfn = page_to_pfn(compound_head(requested_page));

            /* Not a free page */
            printk(KERN_DEBUG
                    "try_claim_free_buddy_page: %#lx: %#lx refcount %i ,page type %lx\n",
                    pfn, cppfn, page_count(compound_head(requested_page)), requested_page->flags);

        }
        unset_migratetype_isolate(requested_page);

        if (locked_page) {
            /*
             * The page is now rightfully ours!
             */
            locked_page_count_after = page_count(locked_page);

            printk(KERN_DEBUG "Buddy: Requested pfn %lx, allocated pfn %lx with pagecount %i (was:%i)\n", page_to_pfn(requested_page), page_to_pfn(locked_page), locked_page_count_after, locked_page_count_before);
            *actual_source = SOURCE_FREE_BUDDY_PAGE;
            ret = CLAIMED_SUCCESSFULLY;
        }

    }
    return ret;
}
Beispiel #4
0
/*
 * Make isolated pages available again.
 */
int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
			    unsigned migratetype)
{
	unsigned long pfn;
	struct page *page;
	BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
	BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
	for (pfn = start_pfn;
	     pfn < end_pfn;
	     pfn += pageblock_nr_pages) {
		page = __first_valid_page(pfn, pageblock_nr_pages);
		if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
			continue;
		unset_migratetype_isolate(page, migratetype);
	}
	return 0;
}
Beispiel #5
0
/*
 * Make isolated pages available again.
 */
int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
			    unsigned migratetype)
{
	unsigned long pfn;
	struct page *page;

	BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
	BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));

	for (pfn = start_pfn;
	     pfn < end_pfn;
	     pfn += pageblock_nr_pages) {
		page = __first_valid_page(pfn, pageblock_nr_pages);
		if (!page || !is_migrate_isolate_page(page))
			continue;
		unset_migratetype_isolate(page, migratetype);
	}
	return 0;
}