void check_checksums(void)
{
	int pfn, index = 0, cpu = smp_processor_id();
	char current_checksum[CHECKSUM_SIZE];
	struct cpu_context *ctx = &per_cpu(contexts, cpu);

	if (!toi_checksum_ops.enabled) {
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksumming disabled.");
		return;
	}

	next_page = (unsigned long) page_list;

	toi_num_resaved = 0;
	this_checksum = 0;

	toi_message(TOI_IO, TOI_VERBOSE, 0, "Verifying checksums.");
	memory_bm_position_reset(pageset2_map);
	for (pfn = memory_bm_next_pfn(pageset2_map); pfn != BM_END_OF_MAP;
			pfn = memory_bm_next_pfn(pageset2_map)) {
		int ret;
		char *pa;
		struct page *page = pfn_to_page(pfn);

		if (index % CHECKSUMS_PER_PAGE) {
			this_checksum += CHECKSUM_SIZE;
		} else {
			this_checksum = next_page + sizeof(void *);
			next_page = *((unsigned long *) next_page);
		}

		/* Done when IRQs disabled so must be atomic */
		pa = kmap_atomic(page);
		memcpy(ctx->buf, pa, PAGE_SIZE);
		kunmap_atomic(pa);
		ret = crypto_hash_digest(&ctx->desc, ctx->sg, PAGE_SIZE,
							current_checksum);

		if (ret) {
			printk(KERN_INFO "Digest failed. Returned %d.\n", ret);
			return;
		}

		if (memcmp(current_checksum, (char *) this_checksum,
							CHECKSUM_SIZE)) {
			toi_message(TOI_IO, TOI_VERBOSE, 0, "Resaving %ld.",
					pfn);
			SetPageResave(pfn_to_page(pfn));
			toi_num_resaved++;
			if (test_action_state(TOI_ABORT_ON_RESAVE_NEEDED))
				set_abort_result(TOI_RESAVE_NEEDED);
		}

		index++;
	}
	toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksum verification complete.");
}
void forget_signature_page(void)
{
	if (toi_cur_sig_page) {
		toi_sig_data = NULL;
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Freeing toi_cur_sig_page (%p).",
			    toi_cur_sig_page);
		toi_free_page(38, (unsigned long)toi_cur_sig_page);
		toi_cur_sig_page = NULL;
	}

	if (toi_orig_sig_page) {
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Freeing toi_orig_sig_page (%p).",
			    toi_orig_sig_page);
		toi_free_page(38, (unsigned long)toi_orig_sig_page);
		toi_orig_sig_page = NULL;
	}
}
int toi_bio_mark_have_image(void)
{
    int result = 0;
    char buf[32];
    struct fs_info *fs_info;

    toi_message(TOI_IO, TOI_VERBOSE, 0, "Recording that an image exists.");
    memcpy(toi_sig_data->sig, tuxonice_signature,
           sizeof(tuxonice_signature));
    toi_sig_data->have_image = 1;
    toi_sig_data->resumed_before = 0;
    toi_sig_data->header_dev_t = get_header_dev_t();
    toi_sig_data->have_uuid = 0;

#if 1 // JJ: check this uuid is mismatch, such that it will failed on toi_bio_read_header_init() !!!
    fs_info = fs_info_from_block_dev(get_header_bdev());
    if (fs_info && !IS_ERR(fs_info)) {
        memcpy(toi_sig_data->header_uuid, &fs_info->uuid, 16);
        free_fs_info(fs_info);
    } else
        result = (int) PTR_ERR(fs_info);
#endif

    if (!result) {
        toi_message(TOI_IO, TOI_VERBOSE, 0, "Got uuid for dev_t %s.",
                    format_dev_t(buf, get_header_dev_t()));
        toi_sig_data->have_uuid = 1;
    } else
        toi_message(TOI_IO, TOI_VERBOSE, 0, "Could not get uuid for "
                    "dev_t %s.",
                    format_dev_t(buf, get_header_dev_t()));

    toi_sig_data->first_header_block = get_headerblock();
    have_image = 1;
    toi_message(TOI_IO, TOI_VERBOSE, 0, "header dev_t is %x. First block "
                "is %d.", toi_sig_data->header_dev_t,
                toi_sig_data->first_header_block);

    memcpy(toi_sig_data->sig2, tuxonice_signature,
           sizeof(tuxonice_signature));
    toi_sig_data->header_version = TOI_HEADER_VERSION;

    return toi_bio_ops.bdev_page_io(WRITE, resume_block_device,
                                    resume_firstblock, virt_to_page(toi_cur_sig_page));
}
int get_signature_page(void)
{
	if (!toi_cur_sig_page) {
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Allocating current signature page.");
		toi_cur_sig_page = (char *)toi_get_zeroed_page(38, TOI_ATOMIC_GFP);
		if (!toi_cur_sig_page) {
			pr_err("Failed to allocate memory for the current image signature.\n");
			return -ENOMEM;
		}

		toi_sig_data = (struct sig_data *)toi_cur_sig_page;
	}

	toi_message(TOI_IO, TOI_VERBOSE, 0, "Reading signature from dev %x, sector %lu.",
		    (unsigned int) resume_block_device->bd_dev, resume_firstblock);

	return toi_bio_ops.bdev_page_io(READ, resume_block_device,
					resume_firstblock, virt_to_page(toi_cur_sig_page));
}
/*
 * We need to ensure we use the signature page that's currently on disk,
 * so as to not remove the image header. Post-atomic-restore, the orig sig
 * page will be empty, so we can use that as our method of knowing that we
 * need to load the on-disk signature and not use the non-image sig in
 * memory. (We're going to powerdown after writing the change, so it's safe.
 */
int toi_bio_mark_resume_attempted(int flag)
{
	toi_message(TOI_IO, TOI_VERBOSE, 0, "Make resume attempted = %d.", flag);
	if (!toi_orig_sig_page) {
		forget_signature_page();
		get_signature_page();
	}
	toi_sig_data->resumed_before = flag;
	return toi_bio_ops.bdev_page_io(WRITE, resume_block_device,
					resume_firstblock, virt_to_page(toi_cur_sig_page));
}
/**
 * toi_open_bdev: Open a bdev at resume time.
 *
 * index: The swap index. May be MAX_SWAPFILES for the resume_dev_t
 * (the user can have resume= pointing at a swap partition/file that isn't
 * swapon'd when they hibernate. MAX_SWAPFILES+1 for the first page of the
 * header. It will be from a swap partition that was enabled when we hibernated,
 * but we don't know it's real index until we read that first page.
 * dev_t: The device major/minor.
 * display_errs: Whether to try to do this quietly.
 *
 * We stored a dev_t in the image header. Open the matching device without
 * requiring /dev/<whatever> in most cases and record the details needed
 * to close it later and avoid duplicating work.
 */
struct block_device *toi_open_bdev(char *uuid, dev_t default_device, int display_errs)
{
	struct block_device *bdev;
	dev_t device = default_device;
	char buf[32];
	int retried = 0;

 retry:
	if (uuid) {
		struct fs_info seek;
		strncpy((char *)&seek.uuid, uuid, 16);
		seek.dev_t = 0;
		seek.last_mount_size = 0;
		device = blk_lookup_fs_info(&seek);
		if (!device) {
			device = default_device;
			printk(KERN_DEBUG "Unable to resolve uuid. Falling back" " to dev_t.\n");
		} else
			printk(KERN_DEBUG "Resolved uuid to device %s.\n",
			       format_dev_t(buf, device));
	}

	if (!device) {
		printk(KERN_ERR "TuxOnIce attempting to open a " "blank dev_t!\n");
		dump_stack();
		return NULL;
	}
	bdev = toi_open_by_devnum(device);

	if (IS_ERR(bdev) || !bdev) {
		if (!retried) {
			retried = 1;
			wait_for_device_probe();
			goto retry;
		}
		if (display_errs)
			toi_early_boot_message(1, TOI_CONTINUE_REQ,
					       "Failed to get access to block device "
					       "\"%x\" (error %d).\n Maybe you need "
					       "to run mknod and/or lvmsetup in an "
					       "initrd/ramfs?", device, bdev);
		return ERR_PTR(-EINVAL);
	}
	toi_message(TOI_BIO, TOI_VERBOSE, 0, "TuxOnIce got bdev %p for dev_t %x.", bdev, device);

	return bdev;
}
/*
 * toi_bio_restore_original_signature - restore the original signature
 *
 * At boot time (aborting pre atomic-restore), toi_orig_sig_page gets used.
 * It will have the original signature page contents, stored in the image
 * header. Post atomic-restore, we use :toi_cur_sig_page, which will contain
 * the contents that were loaded when we started the cycle.
 */
int toi_bio_restore_original_signature(void)
{
	char *use = toi_orig_sig_page ? toi_orig_sig_page : toi_cur_sig_page;

	if (have_old_image)
		return remove_old_signature();

	if (!use) {
		pr_warn("toi_bio_restore_original_signature: No signature " "page loaded.\n");
		return 0;
	}

	toi_message(TOI_IO, TOI_VERBOSE, 0, "Recording that no image exists.");
	have_image = 0;
	toi_sig_data->have_image = 0;
	return toi_bio_ops.bdev_page_io(WRITE, resume_block_device,
					resume_firstblock, virt_to_page(use));
}
/*
 * Image_exists
 *
 * Returns -1 if don't know, otherwise 0 (no) or 1 (yes).
 */
int toi_bio_image_exists(int quiet)
{
    int result;
    char *msg = NULL;

    toi_message(TOI_IO, TOI_VERBOSE, 0, "toi_bio_image_exists.");

    if (!resume_dev_t) {
        if (!quiet)
            printk(KERN_INFO "Not even trying to read header "
                   "because resume_dev_t is not set.\n");
        return -1;
    }

    if (open_resume_dev_t(0, quiet))
        return -1;

    result = toi_check_for_signature();

    clear_toi_state(TOI_RESUMED_BEFORE);
    if (toi_sig_data->resumed_before)
        set_toi_state(TOI_RESUMED_BEFORE);

    if (quiet || result == -ENOMEM)
        return result;

    if (result == -1)
        msg = "TuxOnIce: Unable to find a signature."
              " Could you have moved a swap file?\n";
    else if (!result)
        msg = "TuxOnIce: No image found.\n";
    else if (result == 1)
        msg = "TuxOnIce: Image found.\n";
    else if (result == 2)
        msg = "TuxOnIce: uswsusp or swsusp image found.\n";
    else if (result == 3)
        msg = "TuxOnIce: Old implementation's signature found.\n";

    printk(KERN_INFO "%s", msg);

    return result;
}
/*
 * check_for_signature - See whether we have an image.
 *
 * Returns 0 if no image, 1 if there is one, -1 if indeterminate.
 */
int toi_check_for_signature(void)
{
	union p_diskpage swap_header_page;
	int type;
	static const char * const normal_sigs[] = { "SWAP-SPACE", "SWAPSPACE2" };
	static const char * const swsusp_sigs[] = { "S1SUSP", "S2SUSP", "S1SUSPEND" };
	char *swap_header;

	if (!toi_cur_sig_page) {
		int result = get_signature_page();

		if (result)
			return result;
	}

	/*
	 * Start by looking for the binary header.
	 */
	if (!memcmp(tuxonice_signature, toi_cur_sig_page, sizeof(tuxonice_signature))) {
		have_image = toi_sig_data->have_image;
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Have binary signature. Have image is %d.",
			    have_image);
		if (have_image)
			toi_message(TOI_IO, TOI_VERBOSE, 0, "header dev_t is %x. First block is %lu.",
				    (unsigned int) toi_sig_data->header_dev_t, toi_sig_data->first_header_block);
		return toi_sig_data->have_image;
	}

	/*
	 * Failing that, try old file allocator headers.
	 */

	if (!memcmp(HaveImage, toi_cur_sig_page, strlen(HaveImage))) {
		have_image = 1;
		return 1;
	}

	have_image = 0;

	if (!memcmp(NoImage, toi_cur_sig_page, strlen(NoImage)))
		return 0;

	/*
	 * Nope? How about swap?
	 */
	swap_header_page = (union p_diskpage)toi_cur_sig_page;
	swap_header = swap_header_page.pointer->swh.magic.magic;

	/* Normal swapspace? */
	for (type = 0; type < 2; type++)
		if (!memcmp(normal_sigs[type], swap_header, strlen(normal_sigs[type])))
			return 0;

	/* Swsusp or uswsusp? */
	for (type = 0; type < 3; type++)
		if (!memcmp(swsusp_sigs[type], swap_header, strlen(swsusp_sigs[type])))
			return 2;

	/* Old TuxOnIce version? */
	if (!memcmp(tuxonice_signature, swap_header, sizeof(tuxonice_signature) - 1)) {
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Found old TuxOnIce " "signature.");
		have_old_image = 1;
		return 3;
	}

	return -1;
}
Exemplo n.º 10
0
/**
 * get_pageset1_load_addresses - generate pbes for conflicting pages
 *
 * We check here that pagedir & pages it points to won't collide
 * with pages where we're going to restore from the loaded pages
 * later.
 *
 * Returns:
 *	Zero on success, one if couldn't find enough pages (shouldn't
 *	happen).
 **/
int toi_get_pageset1_load_addresses(void)
{
	int pfn, highallocd = 0, lowallocd = 0;
	int low_needed = pagedir1.size - get_highmem_size(pagedir1);
	int high_needed = get_highmem_size(pagedir1);
	int low_pages_for_highmem = 0;
	gfp_t flags = GFP_ATOMIC | __GFP_NOWARN | __GFP_HIGHMEM;
	struct page *page, *high_pbe_page = NULL, *last_high_pbe_page = NULL,
		    *low_pbe_page, *last_low_pbe_page = NULL;
	struct pbe **last_high_pbe_ptr = &restore_highmem_pblist,
		   *this_high_pbe = NULL;
	int orig_low_pfn, orig_high_pfn;
	int high_pbes_done = 0, low_pbes_done = 0;
	int low_direct = 0, high_direct = 0, result = 0, i;
	int high_page = 1, high_offset = 0, low_page = 1, low_offset = 0;

	memory_bm_set_iterators(pageset1_map, 3);
	memory_bm_position_reset(pageset1_map);

	memory_bm_set_iterators(pageset1_copy_map, 2);
	memory_bm_position_reset(pageset1_copy_map);

	last_low_pbe_ptr = &restore_pblist;

	/* First, allocate pages for the start of our pbe lists. */
	if (high_needed) {
		high_pbe_page = ___toi_get_nonconflicting_page(1);
		if (!high_pbe_page) {
			result = -ENOMEM;
			goto out;
		}
		this_high_pbe = (struct pbe *) kmap(high_pbe_page);
		memset(this_high_pbe, 0, PAGE_SIZE);
	}

	low_pbe_page = ___toi_get_nonconflicting_page(0);
	if (!low_pbe_page) {
		result = -ENOMEM;
		goto out;
	}
	this_low_pbe = (struct pbe *) page_address(low_pbe_page);

	/*
	 * Next, allocate the number of pages we need.
	 */

	i = low_needed + high_needed;

	do {
		int is_high;

		if (i == low_needed)
			flags &= ~__GFP_HIGHMEM;

		page = toi_alloc_page(30, flags);
		BUG_ON(!page);

		SetPagePageset1Copy(page);
		is_high = PageHighMem(page);

		if (PagePageset1(page)) {
			if (is_high)
				high_direct++;
			else
				low_direct++;
		} else {
			if (is_high)
				highallocd++;
			else
				lowallocd++;
		}
	} while (--i);

	high_needed -= high_direct;
	low_needed -= low_direct;

	/*
	 * Do we need to use some lowmem pages for the copies of highmem
	 * pages?
	 */
	if (high_needed > highallocd) {
		low_pages_for_highmem = high_needed - highallocd;
		high_needed -= low_pages_for_highmem;
		low_needed += low_pages_for_highmem;
	}

	/*
	 * Now generate our pbes (which will be used for the atomic restore),
	 * and free unneeded pages.
	 */
	memory_bm_position_reset(pageset1_copy_map);
	for (pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1); pfn != BM_END_OF_MAP;
			pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1)) {
		int is_high;
		page = pfn_to_page(pfn);
		is_high = PageHighMem(page);

		if (PagePageset1(page))
			continue;

		/* Nope. We're going to use this page. Add a pbe. */
		if (is_high || low_pages_for_highmem) {
			struct page *orig_page;
			high_pbes_done++;
			if (!is_high)
				low_pages_for_highmem--;
			do {
				orig_high_pfn = memory_bm_next_pfn_index(pageset1_map, 1);
				BUG_ON(orig_high_pfn == BM_END_OF_MAP);
				orig_page = pfn_to_page(orig_high_pfn);
			} while (!PageHighMem(orig_page) ||
					PagePageset1Copy(orig_page));

			this_high_pbe->orig_address = orig_page;
			this_high_pbe->address = page;
			this_high_pbe->next = NULL;
			toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "High pbe %d/%d: %p(%d)=>%p",
					high_page, high_offset, page, orig_high_pfn, orig_page);
			if (last_high_pbe_page != high_pbe_page) {
				*last_high_pbe_ptr =
					(struct pbe *) high_pbe_page;
				if (last_high_pbe_page) {
					kunmap(last_high_pbe_page);
					high_page++;
					high_offset = 0;
				} else
					high_offset++;
				last_high_pbe_page = high_pbe_page;
			} else {
				*last_high_pbe_ptr = this_high_pbe;
				high_offset++;
			}
			last_high_pbe_ptr = &this_high_pbe->next;
			this_high_pbe = get_next_pbe(&high_pbe_page,
					this_high_pbe, 1);
			if (IS_ERR(this_high_pbe)) {
				printk(KERN_INFO
						"This high pbe is an error.\n");
				return -ENOMEM;
			}
		} else {
			struct page *orig_page;
			low_pbes_done++;
			do {
				orig_low_pfn = memory_bm_next_pfn_index(pageset1_map, 2);
				BUG_ON(orig_low_pfn == BM_END_OF_MAP);
				orig_page = pfn_to_page(orig_low_pfn);
			} while (PageHighMem(orig_page) ||
					PagePageset1Copy(orig_page));

			this_low_pbe->orig_address = page_address(orig_page);
			this_low_pbe->address = page_address(page);
			this_low_pbe->next = NULL;
			toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "Low pbe %d/%d: %p(%d)=>%p",
					low_page, low_offset, this_low_pbe->orig_address,
					orig_low_pfn, this_low_pbe->address);
			*last_low_pbe_ptr = this_low_pbe;
			last_low_pbe_ptr = &this_low_pbe->next;
			this_low_pbe = get_next_pbe(&low_pbe_page,
					this_low_pbe, 0);
			if (low_pbe_page != last_low_pbe_page) {
				if (last_low_pbe_page) {
					low_page++;
					low_offset = 0;
				}
				last_low_pbe_page = low_pbe_page;
			} else
				low_offset++;
			if (IS_ERR(this_low_pbe)) {
				printk(KERN_INFO "this_low_pbe is an error.\n");
				return -ENOMEM;
			}
		}
	}

	if (high_pbe_page)
		kunmap(high_pbe_page);

	if (last_high_pbe_page != high_pbe_page) {
		if (last_high_pbe_page)
			kunmap(last_high_pbe_page);
		toi__free_page(29, high_pbe_page);
	}

	free_conflicting_pages();

out:
	memory_bm_set_iterators(pageset1_map, 1);
	memory_bm_set_iterators(pageset1_copy_map, 1);
	return result;
}