Beispiel #1
0
static void
copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
{
	struct zone *zone;
	unsigned long pfn;

	for_each_zone (zone) {
		unsigned long max_zone_pfn;

		if (is_highmem(zone))
			continue;

		mark_free_pages(zone);
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (saveable_page(pfn))
				memory_bm_set_bit(orig_bm, pfn);
	}
	memory_bm_position_reset(orig_bm);
	memory_bm_position_reset(copy_bm);
	do {
		pfn = memory_bm_next_pfn(orig_bm);
		if (likely(pfn != BM_END_OF_MAP)) {
			struct page *page;
			void *src;

			page = pfn_to_page(pfn);
			src = page_address(page);
			page = pfn_to_page(memory_bm_next_pfn(copy_bm));
			copy_data_page(page_address(page), src);
		}
	} while (pfn != BM_END_OF_MAP);
}
void check_checksums(void)
{
	int pfn, index = 0, cpu = smp_processor_id();
	char current_checksum[CHECKSUM_SIZE];
	struct cpu_context *ctx = &per_cpu(contexts, cpu);

	if (!toi_checksum_ops.enabled) {
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksumming disabled.");
		return;
	}

	next_page = (unsigned long) page_list;

	toi_num_resaved = 0;
	this_checksum = 0;

	toi_message(TOI_IO, TOI_VERBOSE, 0, "Verifying checksums.");
	memory_bm_position_reset(pageset2_map);
	for (pfn = memory_bm_next_pfn(pageset2_map); pfn != BM_END_OF_MAP;
			pfn = memory_bm_next_pfn(pageset2_map)) {
		int ret;
		char *pa;
		struct page *page = pfn_to_page(pfn);

		if (index % CHECKSUMS_PER_PAGE) {
			this_checksum += CHECKSUM_SIZE;
		} else {
			this_checksum = next_page + sizeof(void *);
			next_page = *((unsigned long *) next_page);
		}

		/* Done when IRQs disabled so must be atomic */
		pa = kmap_atomic(page);
		memcpy(ctx->buf, pa, PAGE_SIZE);
		kunmap_atomic(pa);
		ret = crypto_hash_digest(&ctx->desc, ctx->sg, PAGE_SIZE,
							current_checksum);

		if (ret) {
			printk(KERN_INFO "Digest failed. Returned %d.\n", ret);
			return;
		}

		if (memcmp(current_checksum, (char *) this_checksum,
							CHECKSUM_SIZE)) {
			toi_message(TOI_IO, TOI_VERBOSE, 0, "Resaving %ld.",
					pfn);
			SetPageResave(pfn_to_page(pfn));
			toi_num_resaved++;
			if (test_action_state(TOI_ABORT_ON_RESAVE_NEEDED))
				set_abort_result(TOI_RESAVE_NEEDED);
		}

		index++;
	}
	toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksum verification complete.");
}
Beispiel #3
0
/**
 * toi_copy_pageset1 - do the atomic copy of pageset1
 *
 * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
 * because we can't be sure what side effects it has. On my old Duron, with
 * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
 * count at resume time 4 instead of 3.
 *
 * We don't want to call kmap_atomic unconditionally because it has the side
 * effect of incrementing the preempt count, which will leave it one too high
 * post resume (the page containing the preempt count will be copied after
 * its incremented. This is essentially the same problem.
 **/
void toi_copy_pageset1(void)
{
	int i;
	unsigned long source_index, dest_index;

	memory_bm_position_reset(pageset1_map);
	memory_bm_position_reset(pageset1_copy_map);

	source_index = memory_bm_next_pfn(pageset1_map);
	dest_index = memory_bm_next_pfn(pageset1_copy_map);

	for (i = 0; i < pagedir1.size; i++) {
		unsigned long *origvirt, *copyvirt;
		struct page *origpage, *copypage;
		int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1,
		    was_present1, was_present2;

		origpage = pfn_to_page(source_index);
		copypage = pfn_to_page(dest_index);

		origvirt = PageHighMem(origpage) ?
			kmap_atomic(origpage) :
			page_address(origpage);

		copyvirt = PageHighMem(copypage) ?
			kmap_atomic(copypage) :
			page_address(copypage);

		was_present1 = kernel_page_present(origpage);
		if (!was_present1)
			kernel_map_pages(origpage, 1, 1);

		was_present2 = kernel_page_present(copypage);
		if (!was_present2)
			kernel_map_pages(copypage, 1, 1);

		while (loop >= 0) {
			*(copyvirt + loop) = *(origvirt + loop);
			loop--;
		}

		if (!was_present1)
			kernel_map_pages(origpage, 1, 0);

		if (!was_present2)
			kernel_map_pages(copypage, 1, 0);

		if (PageHighMem(origpage))
			kunmap_atomic(origvirt);

		if (PageHighMem(copypage))
			kunmap_atomic(copyvirt);

		source_index = memory_bm_next_pfn(pageset1_map);
		dest_index = memory_bm_next_pfn(pageset1_copy_map);
	}
}
Beispiel #4
0
static void
duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
{
	unsigned long pfn;

	memory_bm_position_reset(src);
	pfn = memory_bm_next_pfn(src);
	while (pfn != BM_END_OF_MAP) {
		memory_bm_set_bit(dst, pfn);
		pfn = memory_bm_next_pfn(src);
	}
}
Beispiel #5
0
static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
{
	struct pbe *pbe;
	struct page *page = pfn_to_page(memory_bm_next_pfn(bm));

	if (PageNosave(page) && PageNosaveFree(page))
		/* We have allocated the "original" page frame and we can
		 * use it directly to store the loaded page.
		 */
		return page_address(page);

	/* The "original" page frame has not been allocated and we have to
	 * use a "safe" page frame to store the loaded page.
	 */
	pbe = chain_alloc(ca, sizeof(struct pbe));
	if (!pbe) {
		swsusp_free();
		return NULL;
	}
	pbe->orig_address = (unsigned long)page_address(page);
	pbe->address = (unsigned long)safe_pages_list;
	safe_pages_list = safe_pages_list->next;
	pbe->next = restore_pblist;
	restore_pblist = pbe;
	return (void *)pbe->address;
}
Beispiel #6
0
static int mark_unsafe_pages(struct memory_bitmap *bm)
{
	struct zone *zone;
	unsigned long pfn, max_zone_pfn;

	/* Clear page flags */
	for_each_zone (zone) {
		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
			if (pfn_valid(pfn))
				ClearPageNosaveFree(pfn_to_page(pfn));
	}

	/* Mark pages that correspond to the "original" pfns as "unsafe" */
	memory_bm_position_reset(bm);
	do {
		pfn = memory_bm_next_pfn(bm);
		if (likely(pfn != BM_END_OF_MAP)) {
			if (likely(pfn_valid(pfn)))
				SetPageNosaveFree(pfn_to_page(pfn));
			else
				return -EFAULT;
		}
	} while (pfn != BM_END_OF_MAP);

	allocated_unsafe_pages = 0;

	return 0;
}
Beispiel #7
0
static inline void
pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
{
	int j;

	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
		buf[j] = memory_bm_next_pfn(bm);
		if (unlikely(buf[j] == BM_END_OF_MAP))
			break;
	}
}
Beispiel #8
0
int snapshot_read_next(struct snapshot_handle *handle, size_t count)
{
	if (handle->cur > nr_meta_pages + nr_copy_pages)
		return 0;

	if (!buffer) {
		/* This makes the buffer be freed by swsusp_free() */
		buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
		if (!buffer)
			return -ENOMEM;
	}
	if (!handle->offset) {
		init_header((struct swsusp_info *)buffer);
		handle->buffer = buffer;
		memory_bm_position_reset(&orig_bm);
		memory_bm_position_reset(&copy_bm);
	}
	if (handle->prev < handle->cur) {
		if (handle->cur <= nr_meta_pages) {
			memset(buffer, 0, PAGE_SIZE);
			pack_pfns(buffer, &orig_bm);
		} else {
			unsigned long pfn = memory_bm_next_pfn(&copy_bm);

			handle->buffer = page_address(pfn_to_page(pfn));
		}
		handle->prev = handle->cur;
	}
	handle->buf_offset = handle->cur_offset;
	if (handle->cur_offset + count >= PAGE_SIZE) {
		count = PAGE_SIZE - handle->cur_offset;
		handle->cur_offset = 0;
		handle->cur++;
	} else {
		handle->cur_offset += count;
	}
	handle->offset += count;
	return count;
}
Beispiel #9
0
struct page *___toi_get_nonconflicting_page(int can_be_highmem)
{
	struct page *page;
	gfp_t flags = TOI_ATOMIC_GFP;
	if (can_be_highmem)
		flags |= __GFP_HIGHMEM;


	if (test_toi_state(TOI_LOADING_ALT_IMAGE) &&
			pageset2_map &&
			(ptoi_pfn != BM_END_OF_MAP)) {
		do {
			ptoi_pfn = memory_bm_next_pfn(pageset2_map);
			if (ptoi_pfn != BM_END_OF_MAP) {
				page = pfn_to_page(ptoi_pfn);
				if (!PagePageset1(page) &&
				    (can_be_highmem || !PageHighMem(page)))
					return page;
			}
		} while (ptoi_pfn != BM_END_OF_MAP);
	}

	do {
		page = toi_alloc_page(29, flags);
		if (!page) {
			printk(KERN_INFO "Failed to get nonconflicting "
					"page.\n");
			return NULL;
		}
		if (PagePageset1(page)) {
			struct page **next = (struct page **) kmap(page);
			*next = first_conflicting_page;
			first_conflicting_page = page;
			kunmap(page);
		}
	} while (PagePageset1(page));

	return page;
}
/**
 * toi_copy_pageset1 - do the atomic copy of pageset1
 *
 * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
 * because we can't be sure what side effects it has. On my old Duron, with
 * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
 * count at resume time 4 instead of 3.
 *
 * We don't want to call kmap_atomic unconditionally because it has the side
 * effect of incrementing the preempt count, which will leave it one too high
 * post resume (the page containing the preempt count will be copied after
 * its incremented. This is essentially the same problem.
 **/
void toi_copy_pageset1(void)
{
	int i;
	unsigned long source_index, dest_index;

	memory_bm_position_reset(pageset1_map);
	memory_bm_position_reset(pageset1_copy_map);

	source_index = memory_bm_next_pfn(pageset1_map);
	dest_index = memory_bm_next_pfn(pageset1_copy_map);

	for (i = 0; i < pagedir1.size; i++) {
		unsigned long *origvirt, *copyvirt;
		struct page *origpage, *copypage;
		int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2;

#ifdef CONFIG_TOI_ENHANCE
		if (!pfn_valid(source_index) || !pfn_valid(dest_index)) {
			pr_emerg("[%s] (%d) dest_index:%lu, source_index:%lu\n", __func__, i,
				 dest_index, source_index);
			set_abort_result(TOI_ARCH_PREPARE_FAILED);
			return;
		}
#endif

		origpage = pfn_to_page(source_index);
		copypage = pfn_to_page(dest_index);

		origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage);

		copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage);

		was_present1 = kernel_page_present(origpage);
		if (!was_present1)
			kernel_map_pages(origpage, 1, 1);

		was_present2 = kernel_page_present(copypage);
		if (!was_present2)
			kernel_map_pages(copypage, 1, 1);

		while (loop >= 0) {
			*(copyvirt + loop) = *(origvirt + loop);
			loop--;
		}

		if (!was_present1)
			kernel_map_pages(origpage, 1, 0);

		if (!was_present2)
			kernel_map_pages(copypage, 1, 0);

		if (PageHighMem(origpage))
			kunmap_atomic(origvirt);

		if (PageHighMem(copypage))
			kunmap_atomic(copyvirt);

		source_index = memory_bm_next_pfn(pageset1_map);
		dest_index = memory_bm_next_pfn(pageset1_copy_map);
	}
}