Esempio n. 1
0
static inline void free_image_page(void *addr, int clear_nosave_free)
{
	ClearPageNosave(virt_to_page(addr));
	if (clear_nosave_free)
		ClearPageNosaveFree(virt_to_page(addr));
	free_page((unsigned long)addr);
}
Esempio n. 2
0
static int mark_unsafe_pages(struct memory_bitmap *bm)
{
	struct zone *zone;
	unsigned long pfn, max_zone_pfn;

	/* Clear page flags */
	for_each_zone (zone) {
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn))
				ClearPageNosaveFree(pfn_to_page(pfn));
	}

	/* Mark pages that correspond to the "original" pfns as "unsafe" */
	memory_bm_position_reset(bm);
	do {
		pfn = memory_bm_next_pfn(bm);
		if (likely(pfn != BM_END_OF_MAP)) {
			if (likely(pfn_valid(pfn)))
				SetPageNosaveFree(pfn_to_page(pfn));
			else
				return -EFAULT;
		}
	} while (pfn != BM_END_OF_MAP);

	allocated_unsafe_pages = 0;

	return 0;
}
Esempio n. 3
0
int diskdump_mark_free_pages(void)
{
	struct zone *zone;
	unsigned long start_pfn, err_pfn, i, pfn;
	int order, free_page_cnt = 0;
	struct list_head *curr, *previous, *dlhead;

	/*
	 * This is not necessary if PG_nosave_free is cleared
	 * while allocating new pages.
	 */
	for (pfn = next_ram_page(ULONG_MAX); pfn < max_pfn; pfn = next_ram_page(pfn))
		if (pfn_valid(pfn))
			ClearPageNosaveFree(pfn_to_page(pfn));

	for_each_zone(zone) {
		if (!zone->spanned_pages)
			continue;

		for (order = MAX_ORDER - 1; order >= 0; --order) {
			/*
			 * Emulate a list_for_each.
			 */
			dlhead = &zone->free_area[order].free_list;

			for (previous = dlhead, curr = dlhead->next;
			     curr != dlhead;
			     previous=curr, curr = curr->next) {

				start_pfn = page_to_pfn(
					list_entry(curr, struct page, lru));

				if (!pfn_valid(start_pfn) ||
				    (previous != curr->prev)) {
					err_pfn = start_pfn;
					goto mark_err;
				}

				for (i = 0; i < (1<<order); i++) {
					pfn = start_pfn + i;
					if (!pfn_valid(pfn) ||
					    TestSetPageNosaveFree(
						  pfn_to_page(pfn))) {
						err_pfn = pfn;
						goto mark_err;
					}
				}
				free_page_cnt += i;
			}
		}
	}
	return free_page_cnt;

mark_err:
	printk(KERN_WARNING "dump: Bad page. PFN %lu.", err_pfn);
	printk(KERN_WARNING "DUMP_LEVEL will be ignored. Free pages will be dumped.");
	return -1;
}
Esempio n. 4
0
void free_pagedir(struct pbe *pblist)
{
	struct pbe *pbe;

	while (pblist) {
		pbe = (pblist + PB_PAGE_SKIP)->next;
		ClearPageNosave(virt_to_page(pblist));
		ClearPageNosaveFree(virt_to_page(pblist));
		free_page((unsigned long)pblist);
		pblist = pbe;
	}
}
Esempio n. 5
0
void swsusp_free(void)
{
	struct zone *zone;
	unsigned long zone_pfn;

	for_each_zone(zone) {
		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
			if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
				struct page *page;
				page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
				if (PageNosave(page) && PageNosaveFree(page)) {
					ClearPageNosave(page);
					ClearPageNosaveFree(page);
					free_page((long) page_address(page));
				}
			}
	}
}
Esempio n. 6
0
void swsusp_free(void)
{
	struct zone *zone;
	unsigned long pfn, max_zone_pfn;

	for_each_zone(zone) {
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn)) {
				struct page *page = pfn_to_page(pfn);

				if (PageNosave(page) && PageNosaveFree(page)) {
					ClearPageNosave(page);
					ClearPageNosaveFree(page);
					free_page((long) page_address(page));
				}
			}
	}
	nr_copy_pages = 0;
	nr_meta_pages = 0;
	restore_pblist = NULL;
	buffer = NULL;
}
Esempio n. 7
0
static int __init swsusp_pagedir_relocate(void)
{
	/*
	 * We have to avoid recursion (not to overflow kernel stack),
	 * and that's why code looks pretty cryptic 
	 */
	suspend_pagedir_t *old_pagedir = pagedir_nosave;
	void **eaten_memory = NULL;
	void **c = eaten_memory, *m, *f;
	int ret = 0;
	struct zone *zone;
	int i;
	struct pbe *p;
	unsigned long zone_pfn;

	printk("Relocating pagedir ");

	/* Set page flags */

	for_each_zone(zone) {
        	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
                	SetPageNosaveFree(pfn_to_page(zone_pfn +
					zone->zone_start_pfn));
	}

	/* Clear orig address */

	for(i = 0, p = pagedir_nosave; i < nr_copy_pages; i++, p++) {
		ClearPageNosaveFree(virt_to_page(p->orig_address));
	}

	if (!does_collide_order((unsigned long)old_pagedir, pagedir_order)) {
		printk("not necessary\n");
		return check_pagedir();
	}

	while ((m = (void *) __get_free_pages(GFP_ATOMIC, pagedir_order)) != NULL) {
		if (!does_collide_order((unsigned long)m, pagedir_order))
			break;
		eaten_memory = m;
		printk( "." ); 
		*eaten_memory = c;
		c = eaten_memory;
	}

	if (!m) {
		printk("out of memory\n");
		ret = -ENOMEM;
	} else {
		pagedir_nosave =
			memcpy(m, old_pagedir, PAGE_SIZE << pagedir_order);
	}

	c = eaten_memory;
	while (c) {
		printk(":");
		f = c;
		c = *c;
		free_pages((unsigned long)f, pagedir_order);
	}
	if (ret)
		return ret;
	printk("|\n");
	return check_pagedir();
}