Example #1
0
struct page *___toi_get_nonconflicting_page(int can_be_highmem)
{
	struct page *page;
	gfp_t flags = TOI_ATOMIC_GFP;
	if (can_be_highmem)
		flags |= __GFP_HIGHMEM;


	if (test_toi_state(TOI_LOADING_ALT_IMAGE) &&
			pageset2_map &&
			(ptoi_pfn != BM_END_OF_MAP)) {
		do {
			ptoi_pfn = memory_bm_next_pfn(pageset2_map);
			if (ptoi_pfn != BM_END_OF_MAP) {
				page = pfn_to_page(ptoi_pfn);
				if (!PagePageset1(page) &&
				    (can_be_highmem || !PageHighMem(page)))
					return page;
			}
		} while (ptoi_pfn != BM_END_OF_MAP);
	}

	do {
		page = toi_alloc_page(29, flags);
		if (!page) {
			printk(KERN_INFO "Failed to get nonconflicting "
					"page.\n");
			return NULL;
		}
		if (PagePageset1(page)) {
			struct page **next = (struct page **) kmap(page);
			*next = first_conflicting_page;
			first_conflicting_page = page;
			kunmap(page);
		}
	} while (PagePageset1(page));

	return page;
}
/*
 * toi_crypto_initialise
 *
 * Prepare to do some work by allocating buffers and transforms.
 * Returns: Int: Zero. Even if we can't set up checksum, we still
 * seek to hibernate.
 */
static int toi_checksum_initialise(int starting_cycle)
{
	int cpu;

	if (!(starting_cycle & SYSFS_HIBERNATE) || !toi_checksum_ops.enabled)
		return 0;

	if (!*toi_checksum_name) {
		printk(KERN_INFO "TuxOnIce: No checksum algorithm name set.\n");
		return 1;
	}

	for_each_online_cpu(cpu) {
		struct cpu_context *this = &per_cpu(contexts, cpu);
		struct page *page;

		this->transform = crypto_alloc_hash(toi_checksum_name, 0, 0);
		if (IS_ERR(this->transform)) {
			printk(KERN_INFO "TuxOnIce: Failed to initialise the "
				"%s checksum algorithm: %ld.\n",
				toi_checksum_name, (long) this->transform);
			this->transform = NULL;
			return 1;
		}

		this->desc.tfm = this->transform;
		this->desc.flags = 0;

		page = toi_alloc_page(27, GFP_KERNEL);
		if (!page)
			return 1;
		this->buf = page_address(page);
		sg_init_one(&this->sg[0], this->buf, PAGE_SIZE);
	}
	return 0;
}
Example #3
0
/**
 * get_pageset1_load_addresses - generate pbes for conflicting pages
 *
 * We check here that pagedir & pages it points to won't collide
 * with pages where we're going to restore from the loaded pages
 * later.
 *
 * Returns:
 *	Zero on success, one if couldn't find enough pages (shouldn't
 *	happen).
 **/
int toi_get_pageset1_load_addresses(void)
{
	int pfn, highallocd = 0, lowallocd = 0;
	int low_needed = pagedir1.size - get_highmem_size(pagedir1);
	int high_needed = get_highmem_size(pagedir1);
	int low_pages_for_highmem = 0;
	gfp_t flags = GFP_ATOMIC | __GFP_NOWARN | __GFP_HIGHMEM;
	struct page *page, *high_pbe_page = NULL, *last_high_pbe_page = NULL,
		    *low_pbe_page, *last_low_pbe_page = NULL;
	struct pbe **last_high_pbe_ptr = &restore_highmem_pblist,
		   *this_high_pbe = NULL;
	int orig_low_pfn, orig_high_pfn;
	int high_pbes_done = 0, low_pbes_done = 0;
	int low_direct = 0, high_direct = 0, result = 0, i;
	int high_page = 1, high_offset = 0, low_page = 1, low_offset = 0;

	memory_bm_set_iterators(pageset1_map, 3);
	memory_bm_position_reset(pageset1_map);

	memory_bm_set_iterators(pageset1_copy_map, 2);
	memory_bm_position_reset(pageset1_copy_map);

	last_low_pbe_ptr = &restore_pblist;

	/* First, allocate pages for the start of our pbe lists. */
	if (high_needed) {
		high_pbe_page = ___toi_get_nonconflicting_page(1);
		if (!high_pbe_page) {
			result = -ENOMEM;
			goto out;
		}
		this_high_pbe = (struct pbe *) kmap(high_pbe_page);
		memset(this_high_pbe, 0, PAGE_SIZE);
	}

	low_pbe_page = ___toi_get_nonconflicting_page(0);
	if (!low_pbe_page) {
		result = -ENOMEM;
		goto out;
	}
	this_low_pbe = (struct pbe *) page_address(low_pbe_page);

	/*
	 * Next, allocate the number of pages we need.
	 */

	i = low_needed + high_needed;

	do {
		int is_high;

		if (i == low_needed)
			flags &= ~__GFP_HIGHMEM;

		page = toi_alloc_page(30, flags);
		BUG_ON(!page);

		SetPagePageset1Copy(page);
		is_high = PageHighMem(page);

		if (PagePageset1(page)) {
			if (is_high)
				high_direct++;
			else
				low_direct++;
		} else {
			if (is_high)
				highallocd++;
			else
				lowallocd++;
		}
	} while (--i);

	high_needed -= high_direct;
	low_needed -= low_direct;

	/*
	 * Do we need to use some lowmem pages for the copies of highmem
	 * pages?
	 */
	if (high_needed > highallocd) {
		low_pages_for_highmem = high_needed - highallocd;
		high_needed -= low_pages_for_highmem;
		low_needed += low_pages_for_highmem;
	}

	/*
	 * Now generate our pbes (which will be used for the atomic restore),
	 * and free unneeded pages.
	 */
	memory_bm_position_reset(pageset1_copy_map);
	for (pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1); pfn != BM_END_OF_MAP;
			pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1)) {
		int is_high;
		page = pfn_to_page(pfn);
		is_high = PageHighMem(page);

		if (PagePageset1(page))
			continue;

		/* Nope. We're going to use this page. Add a pbe. */
		if (is_high || low_pages_for_highmem) {
			struct page *orig_page;
			high_pbes_done++;
			if (!is_high)
				low_pages_for_highmem--;
			do {
				orig_high_pfn = memory_bm_next_pfn_index(pageset1_map, 1);
				BUG_ON(orig_high_pfn == BM_END_OF_MAP);
				orig_page = pfn_to_page(orig_high_pfn);
			} while (!PageHighMem(orig_page) ||
					PagePageset1Copy(orig_page));

			this_high_pbe->orig_address = orig_page;
			this_high_pbe->address = page;
			this_high_pbe->next = NULL;
			toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "High pbe %d/%d: %p(%d)=>%p",
					high_page, high_offset, page, orig_high_pfn, orig_page);
			if (last_high_pbe_page != high_pbe_page) {
				*last_high_pbe_ptr =
					(struct pbe *) high_pbe_page;
				if (last_high_pbe_page) {
					kunmap(last_high_pbe_page);
					high_page++;
					high_offset = 0;
				} else
					high_offset++;
				last_high_pbe_page = high_pbe_page;
			} else {
				*last_high_pbe_ptr = this_high_pbe;
				high_offset++;
			}
			last_high_pbe_ptr = &this_high_pbe->next;
			this_high_pbe = get_next_pbe(&high_pbe_page,
					this_high_pbe, 1);
			if (IS_ERR(this_high_pbe)) {
				printk(KERN_INFO
						"This high pbe is an error.\n");
				return -ENOMEM;
			}
		} else {
			struct page *orig_page;
			low_pbes_done++;
			do {
				orig_low_pfn = memory_bm_next_pfn_index(pageset1_map, 2);
				BUG_ON(orig_low_pfn == BM_END_OF_MAP);
				orig_page = pfn_to_page(orig_low_pfn);
			} while (PageHighMem(orig_page) ||
					PagePageset1Copy(orig_page));

			this_low_pbe->orig_address = page_address(orig_page);
			this_low_pbe->address = page_address(page);
			this_low_pbe->next = NULL;
			toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "Low pbe %d/%d: %p(%d)=>%p",
					low_page, low_offset, this_low_pbe->orig_address,
					orig_low_pfn, this_low_pbe->address);
			*last_low_pbe_ptr = this_low_pbe;
			last_low_pbe_ptr = &this_low_pbe->next;
			this_low_pbe = get_next_pbe(&low_pbe_page,
					this_low_pbe, 0);
			if (low_pbe_page != last_low_pbe_page) {
				if (last_low_pbe_page) {
					low_page++;
					low_offset = 0;
				}
				last_low_pbe_page = low_pbe_page;
			} else
				low_offset++;
			if (IS_ERR(this_low_pbe)) {
				printk(KERN_INFO "this_low_pbe is an error.\n");
				return -ENOMEM;
			}
		}
	}

	if (high_pbe_page)
		kunmap(high_pbe_page);

	if (last_high_pbe_page != high_pbe_page) {
		if (last_high_pbe_page)
			kunmap(last_high_pbe_page);
		toi__free_page(29, high_pbe_page);
	}

	free_conflicting_pages();

out:
	memory_bm_set_iterators(pageset1_map, 1);
	memory_bm_set_iterators(pageset1_copy_map, 1);
	return result;
}