Exemplo n.º 1
0
/**
 * __toi_post_context_save - steps after saving the cpu context
 *
 * Steps taken after saving the CPU state to make the actual
 * atomic copy.
 *
 * Called from swsusp_save in snapshot.c via toi_post_context_save.
 **/
int __toi_post_context_save(void)
{
	unsigned long old_ps1_size = pagedir1.size;

	check_checksums();

	free_checksum_pages();

	toi_recalculate_image_contents(1);

	extra_pd1_pages_used = pagedir1.size > old_ps1_size ?
		pagedir1.size - old_ps1_size : 0;

	if (extra_pd1_pages_used > extra_pd1_pages_allowance) {
		printk(KERN_INFO "Pageset1 has grown by %lu pages. "
			"extra_pages_allowance is currently only %lu.\n",
			pagedir1.size - old_ps1_size,
			extra_pd1_pages_allowance);

		/*
		 * Highlevel code will see this, clear the state and
		 * retry if we haven't already done so twice.
		 */
		if (any_to_free(1)) {
			set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL);
			return 1;
		}
		if (try_allocate_extra_memory()) {
			printk(KERN_INFO "Failed to allocate the extra memory"
					" needed. Restarting the process.");
			set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL);
			return 1;
		}
		printk(KERN_INFO "However it looks like there's enough"
			" free ram and storage to handle this, so "
			" continuing anyway.");
		/* 
		 * What if try_allocate_extra_memory above calls
		 * toi_allocate_extra_pagedir_memory and it allocs a new
		 * slab page via toi_kzalloc which should be in ps1? So...
		 */
		toi_recalculate_image_contents(1);
	}

	if (!test_action_state(TOI_TEST_FILTER_SPEED) &&
	    !test_action_state(TOI_TEST_BIO))
		toi_copy_pageset1();

	return 0;
}
void check_checksums(void)
{
	int pfn, index = 0, cpu = smp_processor_id();
	char current_checksum[CHECKSUM_SIZE];
	struct cpu_context *ctx = &per_cpu(contexts, cpu);

	if (!toi_checksum_ops.enabled) {
		toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksumming disabled.");
		return;
	}

	next_page = (unsigned long) page_list;

	toi_num_resaved = 0;
	this_checksum = 0;

	toi_message(TOI_IO, TOI_VERBOSE, 0, "Verifying checksums.");
	memory_bm_position_reset(pageset2_map);
	for (pfn = memory_bm_next_pfn(pageset2_map); pfn != BM_END_OF_MAP;
			pfn = memory_bm_next_pfn(pageset2_map)) {
		int ret;
		char *pa;
		struct page *page = pfn_to_page(pfn);

		if (index % CHECKSUMS_PER_PAGE) {
			this_checksum += CHECKSUM_SIZE;
		} else {
			this_checksum = next_page + sizeof(void *);
			next_page = *((unsigned long *) next_page);
		}

		/* Done when IRQs disabled so must be atomic */
		pa = kmap_atomic(page);
		memcpy(ctx->buf, pa, PAGE_SIZE);
		kunmap_atomic(pa);
		ret = crypto_hash_digest(&ctx->desc, ctx->sg, PAGE_SIZE,
							current_checksum);

		if (ret) {
			printk(KERN_INFO "Digest failed. Returned %d.\n", ret);
			return;
		}

		if (memcmp(current_checksum, (char *) this_checksum,
							CHECKSUM_SIZE)) {
			toi_message(TOI_IO, TOI_VERBOSE, 0, "Resaving %ld.",
					pfn);
			SetPageResave(pfn_to_page(pfn));
			toi_num_resaved++;
			if (test_action_state(TOI_ABORT_ON_RESAVE_NEEDED))
				set_abort_result(TOI_RESAVE_NEEDED);
		}

		index++;
	}
	toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksum verification complete.");
}
Exemplo n.º 3
0
/**
 * toi_go_atomic - do the actual atomic copy/restore
 * @state:	   The state to use for dpm_suspend_start & power_down calls.
 * @suspend_time:  Whether we're suspending or resuming.
 **/
int toi_go_atomic(pm_message_t state, int suspend_time)
{
  if (suspend_time) {
    if (platform_begin(1)) {
      set_abort_result(TOI_PLATFORM_PREP_FAILED);
      toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3);
      return 1;
    }

    if (dpm_prepare(PMSG_FREEZE)) {
      set_abort_result(TOI_DPM_PREPARE_FAILED);
      dpm_complete(PMSG_RECOVER);
      toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3);
      return 1;
    }
  }

	suspend_console();
	ftrace_stop();
	pm_restrict_gfp_mask();

  if (suspend_time) {
    if (dpm_suspend(state)) {
      set_abort_result(TOI_DPM_SUSPEND_FAILED);
      toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3);
      return 1;
    }
  } else {
    if (dpm_suspend_start(state)) {
      set_abort_result(TOI_DPM_SUSPEND_FAILED);
      toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3);
      return 1;
    }
  }

	/* At this point, dpm_suspend_start() has been called, but *not*
	 * dpm_suspend_noirq(). We *must* dpm_suspend_noirq() now.
	 * Otherwise, drivers for some devices (e.g. interrupt controllers)
	 * become desynchronized with the actual state of the hardware
	 * at resume time, and evil weirdness ensues.
	 */

	if (dpm_suspend_end(state)) {
		set_abort_result(TOI_DEVICE_REFUSED);
		toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 1);
		return 1;
	}

	if (suspend_time) {
		if (platform_pre_snapshot(1))
			set_abort_result(TOI_PRE_SNAPSHOT_FAILED);
	} else {
		if (platform_pre_restore(1))
			set_abort_result(TOI_PRE_RESTORE_FAILED);
	}

	if (test_result_state(TOI_ABORTED)) {
		toi_end_atomic(ATOMIC_STEP_PLATFORM_FINISH, suspend_time, 1);
		return 1;
	}

	if (test_action_state(TOI_LATE_CPU_HOTPLUG)) {
		if (disable_nonboot_cpus()) {
			set_abort_result(TOI_CPU_HOTPLUG_FAILED);
			toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG,
					suspend_time, 1);
			return 1;
		}
	}

	local_irq_disable();

	if (syscore_suspend()) {
		set_abort_result(TOI_SYSCORE_REFUSED);
		toi_end_atomic(ATOMIC_STEP_IRQS, suspend_time, 1);
		return 1;
	}

	if (suspend_time && pm_wakeup_pending()) {
		set_abort_result(TOI_WAKEUP_EVENT);
		toi_end_atomic(ATOMIC_STEP_SYSCORE_RESUME, suspend_time, 1);
		return 1;
	}
	return 0;
}
/**
 * toi_go_atomic - do the actual atomic copy/restore
 * @state:	   The state to use for dpm_suspend_start & power_down calls.
 * @suspend_time:  Whether we're suspending or resuming.
 **/
int toi_go_atomic(pm_message_t state, int suspend_time)
{
	if (suspend_time) {
		if (platform_begin(1)) {
			set_abort_result(TOI_PLATFORM_PREP_FAILED);
			toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3);
			hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__,
				suspend_time, state.event);
			return 1;
		}

		if (dpm_prepare(PMSG_FREEZE)) {
			set_abort_result(TOI_DPM_PREPARE_FAILED);
			dpm_complete(PMSG_RECOVER);
			toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3);
			hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__,
				suspend_time, state.event);
			return 1;
		}
	}

	suspend_console();
	ftrace_stop();
	pm_restrict_gfp_mask();

	if (suspend_time) {
#if 0				/* FIXME: jonathan.jmchen: trick code here to let dpm_suspend succeeded, NEED to find out the root cause!! */
		if (events_check_enabled) {
			hib_log("play trick here set events_check_enabled(%d) = false!!\n",
				events_check_enabled);
			events_check_enabled = false;
		}
#endif
		if (dpm_suspend(state)) {
			set_abort_result(TOI_DPM_SUSPEND_FAILED);
			toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3);
			hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
				__LINE__, suspend_time, state.event, toi_result);
			return 1;
		}
	} else {
		if (dpm_suspend_start(state)) {
			set_abort_result(TOI_DPM_SUSPEND_FAILED);
			toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3);
			hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
				__LINE__, suspend_time, state.event, toi_result);
			return 1;
		}
	}

	/* At this point, dpm_suspend_start() has been called, but *not*
	 * dpm_suspend_noirq(). We *must* dpm_suspend_noirq() now.
	 * Otherwise, drivers for some devices (e.g. interrupt controllers)
	 * become desynchronized with the actual state of the hardware
	 * at resume time, and evil weirdness ensues.
	 */

	if (dpm_suspend_end(state)) {
		set_abort_result(TOI_DEVICE_REFUSED);
		toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 1);
		hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
			suspend_time, state.event, toi_result);
		return 1;
	}

	if (suspend_time) {
		if (platform_pre_snapshot(1))
			set_abort_result(TOI_PRE_SNAPSHOT_FAILED);
	} else {
		if (platform_pre_restore(1))
			set_abort_result(TOI_PRE_RESTORE_FAILED);
	}

	if (test_result_state(TOI_ABORTED)) {
		toi_end_atomic(ATOMIC_STEP_PLATFORM_FINISH, suspend_time, 1);
		hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
			suspend_time, state.event, toi_result);
		return 1;
	}

	if (test_action_state(TOI_LATE_CPU_HOTPLUG)) {
		if (disable_nonboot_cpus()) {
			set_abort_result(TOI_CPU_HOTPLUG_FAILED);
			toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG, suspend_time, 1);
			hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
				__LINE__, suspend_time, state.event, toi_result);
			return 1;
		}
	}

	local_irq_disable();

	if (syscore_suspend()) {
		set_abort_result(TOI_SYSCORE_REFUSED);
		toi_end_atomic(ATOMIC_STEP_IRQS, suspend_time, 1);
		hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
			suspend_time, state.event, toi_result);
		return 1;
	}

	if (suspend_time && pm_wakeup_pending()) {
		set_abort_result(TOI_WAKEUP_EVENT);
		toi_end_atomic(ATOMIC_STEP_SYSCORE_RESUME, suspend_time, 1);
		hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
			suspend_time, state.event, toi_result);
		return 1;
	}
	hib_log("SUCCEEDED @line:%d suspend(%d) pm_state(%d)\n", __LINE__, suspend_time,
		state.event);
	return 0;
}
/**
 * toi_copy_pageset1 - do the atomic copy of pageset1
 *
 * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
 * because we can't be sure what side effects it has. On my old Duron, with
 * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
 * count at resume time 4 instead of 3.
 *
 * We don't want to call kmap_atomic unconditionally because it has the side
 * effect of incrementing the preempt count, which will leave it one too high
 * post resume (the page containing the preempt count will be copied after
 * its incremented. This is essentially the same problem.
 **/
void toi_copy_pageset1(void)
{
	int i;
	unsigned long source_index, dest_index;

	memory_bm_position_reset(pageset1_map);
	memory_bm_position_reset(pageset1_copy_map);

	source_index = memory_bm_next_pfn(pageset1_map);
	dest_index = memory_bm_next_pfn(pageset1_copy_map);

	for (i = 0; i < pagedir1.size; i++) {
		unsigned long *origvirt, *copyvirt;
		struct page *origpage, *copypage;
		int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2;

#ifdef CONFIG_TOI_ENHANCE
		if (!pfn_valid(source_index) || !pfn_valid(dest_index)) {
			pr_emerg("[%s] (%d) dest_index:%lu, source_index:%lu\n", __func__, i,
				 dest_index, source_index);
			set_abort_result(TOI_ARCH_PREPARE_FAILED);
			return;
		}
#endif

		origpage = pfn_to_page(source_index);
		copypage = pfn_to_page(dest_index);

		origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage);

		copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage);

		was_present1 = kernel_page_present(origpage);
		if (!was_present1)
			kernel_map_pages(origpage, 1, 1);

		was_present2 = kernel_page_present(copypage);
		if (!was_present2)
			kernel_map_pages(copypage, 1, 1);

		while (loop >= 0) {
			*(copyvirt + loop) = *(origvirt + loop);
			loop--;
		}

		if (!was_present1)
			kernel_map_pages(origpage, 1, 0);

		if (!was_present2)
			kernel_map_pages(copypage, 1, 0);

		if (PageHighMem(origpage))
			kunmap_atomic(origvirt);

		if (PageHighMem(copypage))
			kunmap_atomic(copyvirt);

		source_index = memory_bm_next_pfn(pageset1_map);
		dest_index = memory_bm_next_pfn(pageset1_copy_map);
	}
}