Beispiel #1
0
static void
copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
{
	struct zone *zone;
	unsigned long pfn;

	for_each_zone (zone) {
		unsigned long max_zone_pfn;

		if (is_highmem(zone))
			continue;

		mark_free_pages(zone);
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (saveable_page(pfn))
				memory_bm_set_bit(orig_bm, pfn);
	}
	memory_bm_position_reset(orig_bm);
	memory_bm_position_reset(copy_bm);
	do {
		pfn = memory_bm_next_pfn(orig_bm);
		if (likely(pfn != BM_END_OF_MAP)) {
			struct page *page;
			void *src;

			page = pfn_to_page(pfn);
			src = page_address(page);
			page = pfn_to_page(memory_bm_next_pfn(copy_bm));
			copy_data_page(page_address(page), src);
		}
	} while (pfn != BM_END_OF_MAP);
}
Beispiel #2
0
/**
 * toi_copy_pageset1 - do the atomic copy of pageset1
 *
 * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
 * because we can't be sure what side effects it has. On my old Duron, with
 * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
 * count at resume time 4 instead of 3.
 *
 * We don't want to call kmap_atomic unconditionally because it has the side
 * effect of incrementing the preempt count, which will leave it one too high
 * post resume (the page containing the preempt count will be copied after
 * its incremented. This is essentially the same problem.
 **/
void toi_copy_pageset1(void)
{
	int i;
	unsigned long source_index, dest_index;

	memory_bm_position_reset(pageset1_map);
	memory_bm_position_reset(pageset1_copy_map);

	source_index = memory_bm_next_pfn(pageset1_map);
	dest_index = memory_bm_next_pfn(pageset1_copy_map);

	for (i = 0; i < pagedir1.size; i++) {
		unsigned long *origvirt, *copyvirt;
		struct page *origpage, *copypage;
		int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1,
		    was_present1, was_present2;

		origpage = pfn_to_page(source_index);
		copypage = pfn_to_page(dest_index);

		origvirt = PageHighMem(origpage) ?
			kmap_atomic(origpage) :
			page_address(origpage);

		copyvirt = PageHighMem(copypage) ?
			kmap_atomic(copypage) :
			page_address(copypage);

		was_present1 = kernel_page_present(origpage);
		if (!was_present1)
			kernel_map_pages(origpage, 1, 1);

		was_present2 = kernel_page_present(copypage);
		if (!was_present2)
			kernel_map_pages(copypage, 1, 1);

		while (loop >= 0) {
			*(copyvirt + loop) = *(origvirt + loop);
			loop--;
		}

		if (!was_present1)
			kernel_map_pages(origpage, 1, 0);

		if (!was_present2)
			kernel_map_pages(copypage, 1, 0);

		if (PageHighMem(origpage))
			kunmap_atomic(origvirt);

		if (PageHighMem(copypage))
			kunmap_atomic(copyvirt);

		source_index = memory_bm_next_pfn(pageset1_map);
		dest_index = memory_bm_next_pfn(pageset1_copy_map);
	}
}
Beispiel #3
0
int snapshot_write_next(struct snapshot_handle *handle, size_t count)
{
	static struct chain_allocator ca;
	int error = 0;

	/* Check if we have already loaded the entire image */
	if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
		return 0;

	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset)
		handle->buffer = buffer;
	handle->sync_read = 1;
	if (handle->prev < handle->cur) {
		if (handle->prev == 0) {
			error = load_header(buffer);
			if (error)
				return error;

			error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
			if (error)
				return error;

		} else if (handle->prev <= nr_meta_pages) {
			unpack_orig_pfns(buffer, &copy_bm);
			if (handle->prev == nr_meta_pages) {
				error = prepare_image(&orig_bm, &copy_bm);
				if (error)
					return error;

				chain_init(&ca, GFP_ATOMIC, PG_SAFE);
				memory_bm_position_reset(&orig_bm);
				restore_pblist = NULL;
				handle->buffer = get_buffer(&orig_bm, &ca);
				handle->sync_read = 0;
				if (!handle->buffer)
					return -ENOMEM;
			}
		} else {
			handle->buffer = get_buffer(&orig_bm, &ca);
			handle->sync_read = 0;
		}
		handle->prev = handle->cur;
	}
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
	} else {
		handle->cur_offset += count;
	}
	handle->offset += count;
	return count;
}
Beispiel #4
0
static int mark_unsafe_pages(struct memory_bitmap *bm)
{
	struct zone *zone;
	unsigned long pfn, max_zone_pfn;

	/* Clear page flags */
	for_each_zone (zone) {
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn))
				ClearPageNosaveFree(pfn_to_page(pfn));
	}

	/* Mark pages that correspond to the "original" pfns as "unsafe" */
	memory_bm_position_reset(bm);
	do {
		pfn = memory_bm_next_pfn(bm);
		if (likely(pfn != BM_END_OF_MAP)) {
			if (likely(pfn_valid(pfn)))
				SetPageNosaveFree(pfn_to_page(pfn));
			else
				return -EFAULT;
		}
	} while (pfn != BM_END_OF_MAP);

	allocated_unsafe_pages = 0;

	return 0;
}
void check_checksums(void)
{
	int pfn, index = 0, cpu = smp_processor_id();
	char current_checksum[CHECKSUM_SIZE];
	struct cpu_context *ctx = &per_cpu(contexts, cpu);

	if (!toi_checksum_ops.enabled) {
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksumming disabled.");
		return;
	}

	next_page = (unsigned long) page_list;

	toi_num_resaved = 0;
	this_checksum = 0;

	toi_message(TOI_IO, TOI_VERBOSE, 0, "Verifying checksums.");
	memory_bm_position_reset(pageset2_map);
	for (pfn = memory_bm_next_pfn(pageset2_map); pfn != BM_END_OF_MAP;
			pfn = memory_bm_next_pfn(pageset2_map)) {
		int ret;
		char *pa;
		struct page *page = pfn_to_page(pfn);

		if (index % CHECKSUMS_PER_PAGE) {
			this_checksum += CHECKSUM_SIZE;
		} else {
			this_checksum = next_page + sizeof(void *);
			next_page = *((unsigned long *) next_page);
		}

		/* Done when IRQs disabled so must be atomic */
		pa = kmap_atomic(page);
		memcpy(ctx->buf, pa, PAGE_SIZE);
		kunmap_atomic(pa);
		ret = crypto_hash_digest(&ctx->desc, ctx->sg, PAGE_SIZE,
							current_checksum);

		if (ret) {
			printk(KERN_INFO "Digest failed. Returned %d.\n", ret);
			return;
		}

		if (memcmp(current_checksum, (char *) this_checksum,
							CHECKSUM_SIZE)) {
			toi_message(TOI_IO, TOI_VERBOSE, 0, "Resaving %ld.",
					pfn);
			SetPageResave(pfn_to_page(pfn));
			toi_num_resaved++;
			if (test_action_state(TOI_ABORT_ON_RESAVE_NEEDED))
				set_abort_result(TOI_RESAVE_NEEDED);
		}

		index++;
	}
	toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksum verification complete.");
}
Beispiel #6
0
static void
duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
{
	unsigned long pfn;

	memory_bm_position_reset(src);
	pfn = memory_bm_next_pfn(src);
	while (pfn != BM_END_OF_MAP) {
		memory_bm_set_bit(dst, pfn);
		pfn = memory_bm_next_pfn(src);
	}
}
Beispiel #7
0
int snapshot_read_next(struct snapshot_handle *handle, size_t count)
{
	if (handle->cur > nr_meta_pages + nr_copy_pages)
		return 0;

	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset) {
		init_header((struct swsusp_info *)buffer);
		handle->buffer = buffer;
		memory_bm_position_reset(&orig_bm);
		memory_bm_position_reset(&copy_bm);
	}
	if (handle->prev < handle->cur) {
		if (handle->cur <= nr_meta_pages) {
			memset(buffer, 0, PAGE_SIZE);
			pack_pfns(buffer, &orig_bm);
		} else {
			unsigned long pfn = memory_bm_next_pfn(&copy_bm);

			handle->buffer = page_address(pfn_to_page(pfn));
		}
		handle->prev = handle->cur;
	}
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
	} else {
		handle->cur_offset += count;
	}
	handle->offset += count;
	return count;
}
Beispiel #8
0
static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
{
	struct zone_bitmap *zone_bm;
	struct bm_block *bb;
	int chunk;
	int bit;

	do {
		bb = bm->cur.block;
		do {
			chunk = bm->cur.chunk;
			bit = bm->cur.bit;
			do {
				bit = next_bit_in_chunk(bit, bb->data + chunk);
				if (bit >= 0)
					goto Return_pfn;

				chunk = next_chunk_in_block(chunk, bb);
				bit = -1;
			} while (chunk >= 0);
			bb = bb->next;
			bm->cur.block = bb;
			memory_bm_reset_chunk(bm);
		} while (bb);
		zone_bm = bm->cur.zone_bm->next;
		if (zone_bm) {
			bm->cur.zone_bm = zone_bm;
			bm->cur.block = zone_bm->bm_blocks;
			memory_bm_reset_chunk(bm);
		}
	} while (zone_bm);
	memory_bm_position_reset(bm);
	return BM_END_OF_MAP;

 Return_pfn:
	bm->cur.chunk = chunk;
	bm->cur.bit = bit;
	return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
}
Beispiel #9
0
static int
memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
{
	struct chain_allocator ca;
	struct zone *zone;
	struct zone_bitmap *zone_bm;
	struct bm_block *bb;
	unsigned int nr;

	chain_init(&ca, gfp_mask, safe_needed);

	/* Compute the number of zones */
	nr = 0;
	for_each_zone(zone)
		if (populated_zone(zone))
			nr++;

	/* Allocate the list of zones bitmap objects */
	zone_bm = create_zone_bm_list(nr, &ca);
	bm->zone_bm_list = zone_bm;
	if (!zone_bm) {
		chain_free(&ca, PG_UNSAFE_CLEAR);
		return -ENOMEM;
	}

	/* Initialize the zone bitmap objects */
	for_each_zone(zone) {
		unsigned long pfn;

		if (!populated_zone(zone))
			continue;

		zone_bm->start_pfn = zone->zone_start_pfn;
		zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
		/* Allocate the list of bitmap block objects */
		nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
		bb = create_bm_block_list(nr, &ca);
		zone_bm->bm_blocks = bb;
		zone_bm->cur_block = bb;
		if (!bb)
			goto Free;

		nr = zone->spanned_pages;
		pfn = zone->zone_start_pfn;
		/* Initialize the bitmap block objects */
		while (bb) {
			unsigned long *ptr;

			ptr = get_image_page(gfp_mask, safe_needed);
			bb->data = ptr;
			if (!ptr)
				goto Free;

			bb->start_pfn = pfn;
			if (nr >= BM_BITS_PER_BLOCK) {
				pfn += BM_BITS_PER_BLOCK;
				bb->size = BM_CHUNKS_PER_BLOCK;
				nr -= BM_BITS_PER_BLOCK;
			} else {
				/* This is executed only once in the loop */
				pfn += nr;
				bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
			}
			bb->end_pfn = pfn;
			bb = bb->next;
		}
		zone_bm = zone_bm->next;
	}
	bm->p_list = ca.chain;
	memory_bm_position_reset(bm);
	return 0;

 Free:
	bm->p_list = ca.chain;
	memory_bm_free(bm, PG_UNSAFE_CLEAR);
	return -ENOMEM;
}
Beispiel #10
0
void toi_reset_alt_image_pageset2_pfn(void)
{
	memory_bm_position_reset(pageset2_map);
}
Beispiel #11
0
/**
 * get_pageset1_load_addresses - generate pbes for conflicting pages
 *
 * We check here that pagedir & pages it points to won't collide
 * with pages where we're going to restore from the loaded pages
 * later.
 *
 * Returns:
 *	Zero on success, one if couldn't find enough pages (shouldn't
 *	happen).
 **/
int toi_get_pageset1_load_addresses(void)
{
	int pfn, highallocd = 0, lowallocd = 0;
	int low_needed = pagedir1.size - get_highmem_size(pagedir1);
	int high_needed = get_highmem_size(pagedir1);
	int low_pages_for_highmem = 0;
	gfp_t flags = GFP_ATOMIC | __GFP_NOWARN | __GFP_HIGHMEM;
	struct page *page, *high_pbe_page = NULL, *last_high_pbe_page = NULL,
		    *low_pbe_page, *last_low_pbe_page = NULL;
	struct pbe **last_high_pbe_ptr = &restore_highmem_pblist,
		   *this_high_pbe = NULL;
	int orig_low_pfn, orig_high_pfn;
	int high_pbes_done = 0, low_pbes_done = 0;
	int low_direct = 0, high_direct = 0, result = 0, i;
	int high_page = 1, high_offset = 0, low_page = 1, low_offset = 0;

	memory_bm_set_iterators(pageset1_map, 3);
	memory_bm_position_reset(pageset1_map);

	memory_bm_set_iterators(pageset1_copy_map, 2);
	memory_bm_position_reset(pageset1_copy_map);

	last_low_pbe_ptr = &restore_pblist;

	/* First, allocate pages for the start of our pbe lists. */
	if (high_needed) {
		high_pbe_page = ___toi_get_nonconflicting_page(1);
		if (!high_pbe_page) {
			result = -ENOMEM;
			goto out;
		}
		this_high_pbe = (struct pbe *) kmap(high_pbe_page);
		memset(this_high_pbe, 0, PAGE_SIZE);
	}

	low_pbe_page = ___toi_get_nonconflicting_page(0);
	if (!low_pbe_page) {
		result = -ENOMEM;
		goto out;
	}
	this_low_pbe = (struct pbe *) page_address(low_pbe_page);

	/*
	 * Next, allocate the number of pages we need.
	 */

	i = low_needed + high_needed;

	do {
		int is_high;

		if (i == low_needed)
			flags &= ~__GFP_HIGHMEM;

		page = toi_alloc_page(30, flags);
		BUG_ON(!page);

		SetPagePageset1Copy(page);
		is_high = PageHighMem(page);

		if (PagePageset1(page)) {
			if (is_high)
				high_direct++;
			else
				low_direct++;
		} else {
			if (is_high)
				highallocd++;
			else
				lowallocd++;
		}
	} while (--i);

	high_needed -= high_direct;
	low_needed -= low_direct;

	/*
	 * Do we need to use some lowmem pages for the copies of highmem
	 * pages?
	 */
	if (high_needed > highallocd) {
		low_pages_for_highmem = high_needed - highallocd;
		high_needed -= low_pages_for_highmem;
		low_needed += low_pages_for_highmem;
	}

	/*
	 * Now generate our pbes (which will be used for the atomic restore),
	 * and free unneeded pages.
	 */
	memory_bm_position_reset(pageset1_copy_map);
	for (pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1); pfn != BM_END_OF_MAP;
			pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1)) {
		int is_high;
		page = pfn_to_page(pfn);
		is_high = PageHighMem(page);

		if (PagePageset1(page))
			continue;

		/* Nope. We're going to use this page. Add a pbe. */
		if (is_high || low_pages_for_highmem) {
			struct page *orig_page;
			high_pbes_done++;
			if (!is_high)
				low_pages_for_highmem--;
			do {
				orig_high_pfn = memory_bm_next_pfn_index(pageset1_map, 1);
				BUG_ON(orig_high_pfn == BM_END_OF_MAP);
				orig_page = pfn_to_page(orig_high_pfn);
			} while (!PageHighMem(orig_page) ||
					PagePageset1Copy(orig_page));

			this_high_pbe->orig_address = orig_page;
			this_high_pbe->address = page;
			this_high_pbe->next = NULL;
			toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "High pbe %d/%d: %p(%d)=>%p",
					high_page, high_offset, page, orig_high_pfn, orig_page);
			if (last_high_pbe_page != high_pbe_page) {
				*last_high_pbe_ptr =
					(struct pbe *) high_pbe_page;
				if (last_high_pbe_page) {
					kunmap(last_high_pbe_page);
					high_page++;
					high_offset = 0;
				} else
					high_offset++;
				last_high_pbe_page = high_pbe_page;
			} else {
				*last_high_pbe_ptr = this_high_pbe;
				high_offset++;
			}
			last_high_pbe_ptr = &this_high_pbe->next;
			this_high_pbe = get_next_pbe(&high_pbe_page,
					this_high_pbe, 1);
			if (IS_ERR(this_high_pbe)) {
				printk(KERN_INFO
						"This high pbe is an error.\n");
				return -ENOMEM;
			}
		} else {
			struct page *orig_page;
			low_pbes_done++;
			do {
				orig_low_pfn = memory_bm_next_pfn_index(pageset1_map, 2);
				BUG_ON(orig_low_pfn == BM_END_OF_MAP);
				orig_page = pfn_to_page(orig_low_pfn);
			} while (PageHighMem(orig_page) ||
					PagePageset1Copy(orig_page));

			this_low_pbe->orig_address = page_address(orig_page);
			this_low_pbe->address = page_address(page);
			this_low_pbe->next = NULL;
			toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "Low pbe %d/%d: %p(%d)=>%p",
					low_page, low_offset, this_low_pbe->orig_address,
					orig_low_pfn, this_low_pbe->address);
			*last_low_pbe_ptr = this_low_pbe;
			last_low_pbe_ptr = &this_low_pbe->next;
			this_low_pbe = get_next_pbe(&low_pbe_page,
					this_low_pbe, 0);
			if (low_pbe_page != last_low_pbe_page) {
				if (last_low_pbe_page) {
					low_page++;
					low_offset = 0;
				}
				last_low_pbe_page = low_pbe_page;
			} else
				low_offset++;
			if (IS_ERR(this_low_pbe)) {
				printk(KERN_INFO "this_low_pbe is an error.\n");
				return -ENOMEM;
			}
		}
	}

	if (high_pbe_page)
		kunmap(high_pbe_page);

	if (last_high_pbe_page != high_pbe_page) {
		if (last_high_pbe_page)
			kunmap(last_high_pbe_page);
		toi__free_page(29, high_pbe_page);
	}

	free_conflicting_pages();

out:
	memory_bm_set_iterators(pageset1_map, 1);
	memory_bm_set_iterators(pageset1_copy_map, 1);
	return result;
}
/**
 * toi_copy_pageset1 - do the atomic copy of pageset1
 *
 * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
 * because we can't be sure what side effects it has. On my old Duron, with
 * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
 * count at resume time 4 instead of 3.
 *
 * We don't want to call kmap_atomic unconditionally because it has the side
 * effect of incrementing the preempt count, which will leave it one too high
 * post resume (the page containing the preempt count will be copied after
 * its incremented. This is essentially the same problem.
 **/
void toi_copy_pageset1(void)
{
	int i;
	unsigned long source_index, dest_index;

	memory_bm_position_reset(pageset1_map);
	memory_bm_position_reset(pageset1_copy_map);

	source_index = memory_bm_next_pfn(pageset1_map);
	dest_index = memory_bm_next_pfn(pageset1_copy_map);

	for (i = 0; i < pagedir1.size; i++) {
		unsigned long *origvirt, *copyvirt;
		struct page *origpage, *copypage;
		int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2;

#ifdef CONFIG_TOI_ENHANCE
		if (!pfn_valid(source_index) || !pfn_valid(dest_index)) {
			pr_emerg("[%s] (%d) dest_index:%lu, source_index:%lu\n", __func__, i,
				 dest_index, source_index);
			set_abort_result(TOI_ARCH_PREPARE_FAILED);
			return;
		}
#endif

		origpage = pfn_to_page(source_index);
		copypage = pfn_to_page(dest_index);

		origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage);

		copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage);

		was_present1 = kernel_page_present(origpage);
		if (!was_present1)
			kernel_map_pages(origpage, 1, 1);

		was_present2 = kernel_page_present(copypage);
		if (!was_present2)
			kernel_map_pages(copypage, 1, 1);

		while (loop >= 0) {
			*(copyvirt + loop) = *(origvirt + loop);
			loop--;
		}

		if (!was_present1)
			kernel_map_pages(origpage, 1, 0);

		if (!was_present2)
			kernel_map_pages(copypage, 1, 0);

		if (PageHighMem(origpage))
			kunmap_atomic(origvirt);

		if (PageHighMem(copypage))
			kunmap_atomic(copyvirt);

		source_index = memory_bm_next_pfn(pageset1_map);
		dest_index = memory_bm_next_pfn(pageset1_copy_map);
	}
}