/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
static void rcu_idle_enter_common(long long newval)
{
	__CPROVER_atomic_begin(); if (!covered[0]) {covered[0] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
	if (newval) {
		__CPROVER_atomic_begin(); if (!covered[1]) {covered[1] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
		RCU_TRACE(trace_rcu_dyntick(TPS("--="),
rcu_dynticks_nesting, newval));
		__CPROVER_atomic_begin(); if (!covered[2]) {covered[2] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
		rcu_dynticks_nesting = newval;
		__CPROVER_atomic_begin(); if (!covered[3]) {covered[3] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
		return;
	__CPROVER_atomic_begin(); if (!covered[4]) {covered[4] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
	}
	__CPROVER_atomic_begin(); if (!covered[5]) {covered[5] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
	RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
				    __CPROVER_atomic_begin(); if (!covered[6]) {covered[6] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
				    rcu_dynticks_nesting, newval));
	__CPROVER_atomic_begin(); if (!covered[7]) {covered[7] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
		__CPROVER_atomic_begin(); if (!covered[8]) {covered[8] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());

		__CPROVER_atomic_begin(); if (!covered[9]) {covered[9] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
		RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
					    __CPROVER_atomic_begin(); if (!covered[10]) {covered[10] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
					    rcu_dynticks_nesting, newval));
		__CPROVER_atomic_begin(); if (!covered[11]) {covered[11] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
		ftrace_dump(DUMP_ALL);
		__CPROVER_atomic_begin(); if (!covered[12]) {covered[12] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  __CPROVER_atomic_begin(); if (!covered[13]) {covered[13] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	__CPROVER_atomic_begin(); if (!covered[14]) {covered[14] = 1; total_covered += 1;} __CPROVER_atomic_end(); 
	}
Example #2
0
/**
 * suspend_prepare - Prepare for entering system sleep state.
 *
 * Common code run for every system sleep state that can be entered (except for
 * hibernation).  Run suspend notifiers, allocate the "suspend" console and
 * freeze processes.
 */
static int suspend_prepare(suspend_state_t state)
{
	int error;

	if (!sleep_state_supported(state))
		return -EPERM;

	pm_prepare_console();

	error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
	if (error)
		goto Finish;

	trace_suspend_resume(TPS("freeze_processes"), 0, true);
	error = suspend_freeze_processes();
	trace_suspend_resume(TPS("freeze_processes"), 0, false);
	if (!error)
		return 0;

	suspend_stats.failed_freeze++;
	dpm_save_failed_step(SUSPEND_FREEZE);
 Finish:
	pm_notifier_call_chain(PM_POST_SUSPEND);
	pm_restore_console();
	return error;
}
/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
static void rcu_idle_enter_common(long long newval)
{
	if (newval) {
		if (!__covered0) {__covered0 = 1; total_covered += 1;}
		RCU_TRACE(trace_rcu_dyntick(TPS("--="),
rcu_dynticks_nesting, newval));
		rcu_dynticks_nesting = newval;
		return;
	}
	RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
				    rcu_dynticks_nesting, newval));
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
		if (!__covered1) {__covered1 = 1; total_covered += 1;}
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
					    rcu_dynticks_nesting, newval));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
	rcu_sched_qs(); /* implies rcu_bh_inc() */
	barrier();
	rcu_dynticks_nesting = newval;
}
Example #4
0
//=============================================================================
static void HW3S_FillSourceParameters
                              (const mobj_t     *origin,
                               source3D_data_t  *data,
                               channel_type_t   c_type)
{
	fixed_t x = 0, y = 0, z = 0;

	data->max_distance = MAX_DISTANCE;
	data->min_distance = MIN_DISTANCE;

	if (origin && origin != players[displayplayer].mo)
	{
		data->head_relative = false;

		data->pos.momx = TPS(FIXED_TO_FLOAT(origin->momx));
		data->pos.momy = TPS(FIXED_TO_FLOAT(origin->momy));
		data->pos.momz = TPS(FIXED_TO_FLOAT(origin->momz));

		x = origin->x;
		y = origin->y;
		z = origin->z;

		if (c_type == CT_ATTACK)
		{
			const angle_t an = origin->angle >> ANGLETOFINESHIFT;
			x += FixedMul(16*FRACUNIT, FINECOSINE(an));
			y += FixedMul(16*FRACUNIT, FINESINE(an));
			z += origin->height >> 1;
		}

		else if (c_type == CT_SCREAM)
Example #5
0
/**
 * enter_state - Do common work needed to enter system sleep state.
 * @state: System sleep state to enter.
 *
 * Make sure that no one else is trying to put the system into a sleep state.
 * Fail if that's not the case.  Otherwise, prepare for system suspend, make the
 * system enter the given sleep state and clean up after wakeup.
 */
static int enter_state(suspend_state_t state)
{
	int error;

	trace_suspend_resume(TPS("suspend_enter"), state, true);
	if (state == PM_SUSPEND_TO_IDLE) {
#ifdef CONFIG_PM_DEBUG
		if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
			pr_warn("Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n");
			return -EAGAIN;
		}
#endif
	} else if (!valid_state(state)) {
		return -EINVAL;
	}
	if (!mutex_trylock(&system_transition_mutex))
		return -EBUSY;

	if (state == PM_SUSPEND_TO_IDLE)
		s2idle_begin();

#ifndef CONFIG_SUSPEND_SKIP_SYNC
	trace_suspend_resume(TPS("sync_filesystems"), 0, true);
	pr_info("Syncing filesystems ... ");
	ksys_sync();
	pr_cont("done.\n");
	trace_suspend_resume(TPS("sync_filesystems"), 0, false);
#endif

	pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
	pm_suspend_clear_flags();
	error = suspend_prepare(state);
	if (error)
		goto Unlock;

	if (suspend_test(TEST_FREEZER))
		goto Finish;

	trace_suspend_resume(TPS("suspend_enter"), state, false);
	pm_pr_dbg("Suspending system (%s)\n", mem_sleep_labels[state]);
	pm_restrict_gfp_mask();
	error = suspend_devices_and_enter(state);
	pm_restore_gfp_mask();

 Finish:
	events_check_enabled = false;
	pm_pr_dbg("Finishing wakeup.\n");
	suspend_finish();
 Unlock:
	mutex_unlock(&system_transition_mutex);
	return error;
}
Example #6
0
/**
 * enter_state - Do common work needed to enter system sleep state.
 * @state: System sleep state to enter.
 *
 * Make sure that no one else is trying to put the system into a sleep state.
 * Fail if that's not the case.  Otherwise, prepare for system suspend, make the
 * system enter the given sleep state and clean up after wakeup.
 */
static int enter_state(suspend_state_t state)
{
	int error;

	trace_suspend_resume(TPS("suspend_enter"), state, true);
	if (state == PM_SUSPEND_FREEZE) {
#ifdef CONFIG_PM_DEBUG
		if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
			pr_warning("PM: Unsupported test mode for freeze state,"
				   "please choose none/freezer/devices/platform.\n");
			return -EAGAIN;
		}
#endif
	} else if (!valid_state(state)) {
		return -EINVAL;
	}
	if (!mutex_trylock(&pm_mutex))
		return -EBUSY;

	if (state == PM_SUSPEND_FREEZE)
		freeze_begin();

	trace_suspend_resume(TPS("sync_filesystems"), 0, true);
	printk(KERN_INFO "PM: Syncing filesystems ... ");
	sys_sync();
	printk("done.\n");
	trace_suspend_resume(TPS("sync_filesystems"), 0, false);

	pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
	error = suspend_prepare(state);
	if (error)
		goto Unlock;

	if (suspend_test(TEST_FREEZER))
		goto Finish;

	trace_suspend_resume(TPS("suspend_enter"), state, false);
	pr_debug("PM: Entering %s sleep\n", pm_states[state]);
	pm_restrict_gfp_mask();
	error = suspend_devices_and_enter(state);
	pm_restore_gfp_mask();

 Finish:
	pr_debug("PM: Finishing wakeup.\n");
	suspend_finish();
 Unlock:
	mutex_unlock(&pm_mutex);
	return error;
}
Example #7
0
/**
 * suspend_devices_and_enter - Suspend devices and enter system sleep state.
 * @state: System sleep state to enter.
 */
int suspend_devices_and_enter(suspend_state_t state)
{
	int error;
	bool wakeup = false;

	if (!sleep_state_supported(state))
		return -ENOSYS;

	pm_suspend_target_state = state;

	error = platform_suspend_begin(state);
	if (error)
		goto Close;

	suspend_console();
	suspend_test_start();
	error = dpm_suspend_start(PMSG_SUSPEND);
	if (error) {
		pr_err("Some devices failed to suspend, or early wake event detected\n");
		goto Recover_platform;
	}
	suspend_test_finish("suspend devices");
	if (suspend_test(TEST_DEVICES))
		goto Recover_platform;

	do {
		error = suspend_enter(state, &wakeup);
	} while (!error && !wakeup && platform_suspend_again(state));

 Resume_devices:
	suspend_test_start();
	dpm_resume_end(PMSG_RESUME);
	suspend_test_finish("resume devices");
	trace_suspend_resume(TPS("resume_console"), state, true);
	resume_console();
	trace_suspend_resume(TPS("resume_console"), state, false);

 Close:
	platform_resume_end(state);
	pm_suspend_target_state = PM_SUSPEND_ON;
	return error;

 Recover_platform:
	platform_recover(state);
	goto Resume_devices;
}
Example #8
0
/**
 * syscore_resume - Execute all the registered system core resume callbacks.
 *
 * This function is executed with one CPU on-line and disabled interrupts.
 */
void syscore_resume(void)
{
	struct syscore_ops *ops;

	trace_suspend_resume(TPS("syscore_resume"), 0, true);
	WARN_ONCE(!irqs_disabled(),
		"Interrupts enabled before system core resume.\n");

	list_for_each_entry(ops, &syscore_ops_list, node)
		if (ops->resume) {
			if (initcall_debug)
				pr_info("PM: Calling %pF\n", ops->resume);
			ops->resume();
			WARN_ONCE(!irqs_disabled(),
				"Interrupts enabled after %pF\n", ops->resume);
		}
	trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
static void rcu_idle_exit_common(long long oldval)
{
	if (oldval) {
		RCU_TRACE(trace_rcu_dyntick(TPS("++="),
					    oldval, rcu_dynticks_nesting));
		return;
	}
	RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
	if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
		struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());

		RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
			  oldval, rcu_dynticks_nesting));
		ftrace_dump(DUMP_ALL);
		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
}
Example #10
0
int disable_nonboot_cpus(void)
{
	int cpu, first_cpu, error = 0;

	cpu_maps_update_begin();
	first_cpu = cpumask_first(cpu_online_mask);
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
	cpumask_clear(frozen_cpus);

	pr_info("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
		error = _cpu_down(cpu, 1);
		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
		if (!error)
			cpumask_set_cpu(cpu, frozen_cpus);
		else {
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
			break;
		}
	}

	if (!error)
		BUG_ON(num_online_cpus() > 1);
	else
		pr_err("Non-boot CPUs are not disabled\n");

	/*
	 * Make sure the CPUs won't be enabled by someone else. We need to do
	 * this even in case of failure as all disable_nonboot_cpus() users are
	 * supposed to do enable_nonboot_cpus() on the failure path.
	 */
	cpu_hotplug_disabled++;

	cpu_maps_update_done();
	return error;
}
Example #11
0
t_bool			ft_tinit(t_term *term)
{
	struct termios	tc;

	if ((term->name = ft_getenv("TERM")) == NULL
		|| tgetent(NULL, term->name) <= 0)
		term->name = DEFAULT_TERM;
	if (tgetent(NULL, term->name) <= 0)
		return (false);
	if (tcgetattr(0, &tc) < 0)
		return (false);
	ft_memcpy(&(term->save), &tc, sizeof(struct termios));
	tc.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHONL | ICANON);
	tc.c_cc[VMIN] = 1;
	tc.c_cc[VTIME] = 0;
	if (tcsetattr(0, TCSADRAIN, &tc) < 0)
		return (false);
	term->save_am = (tgetflag("am")) ? true : false;
	TPS("ti"), TPS("vi"), TPS("RA");
	ft_tupdate(term);
	return (true);
}
Example #12
0
/**
 * syscore_suspend - Execute all the registered system core suspend callbacks.
 *
 * This function is executed with one CPU on-line and disabled interrupts.
 */
int syscore_suspend(void)
{
	struct syscore_ops *ops;
	int ret = 0;

	trace_suspend_resume(TPS("syscore_suspend"), 0, true);
	pr_debug("Checking wakeup interrupts\n");

	/* Return error code if there are any wakeup interrupts pending. */
	if (pm_wakeup_pending())
		return -EBUSY;

	WARN_ONCE(!irqs_disabled(),
		"Interrupts enabled before system core suspend.\n");

	list_for_each_entry_reverse(ops, &syscore_ops_list, node)
		if (ops->suspend) {
			if (initcall_debug)
				pr_info("PM: Calling %pF\n", ops->suspend);
			ret = ops->suspend();
			if (ret)
				goto err_out;
			WARN_ONCE(!irqs_disabled(),
				"Interrupts enabled after %pF\n", ops->suspend);
		}

	trace_suspend_resume(TPS("syscore_suspend"), 0, false);
	return 0;

 err_out:
	pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);

	list_for_each_entry_continue(ops, &syscore_ops_list, node)
		if (ops->resume)
			ops->resume();

	return ret;
}
Example #13
0
/**
 * tick_freeze - Suspend the local tick and (possibly) timekeeping.
 *
 * Check if this is the last online CPU executing the function and if so,
 * suspend timekeeping.  Otherwise suspend the local tick.
 *
 * Call with interrupts disabled.  Must be balanced with %tick_unfreeze().
 * Interrupts must not be enabled before the subsequent %tick_unfreeze().
 */
void tick_freeze(void)
{
	raw_spin_lock(&tick_freeze_lock);

	tick_freeze_depth++;
	if (tick_freeze_depth == num_online_cpus()) {
		trace_suspend_resume(TPS("timekeeping_freeze"),
				     smp_processor_id(), true);
		timekeeping_suspend();
	} else {
		tick_suspend_local();
	}

	raw_spin_unlock(&tick_freeze_lock);
}
Example #14
0
void __ref enable_nonboot_cpus(void)
{
	int cpu, error;

	/* Allow everyone to use the CPU hotplug again */
	cpu_maps_update_begin();
	cpu_hotplug_disabled = 0;
	if (cpumask_empty(frozen_cpus))
		goto out;

	pr_info("Enabling non-boot CPUs ...\n");

	arch_enable_nonboot_cpus_begin();

	for_each_cpu(cpu, frozen_cpus) {
		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
		error = _cpu_up(cpu, 1);
		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
		if (!error) {
			pr_info("CPU%d is up\n", cpu);
			continue;
		}
		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
	}
Example #15
0
/**
 * tick_unfreeze - Resume the local tick and (possibly) timekeeping.
 *
 * Check if this is the first CPU executing the function and if so, resume
 * timekeeping.  Otherwise resume the local tick.
 *
 * Call with interrupts disabled.  Must be balanced with %tick_freeze().
 * Interrupts must not be enabled after the preceding %tick_freeze().
 */
void tick_unfreeze(void)
{
	raw_spin_lock(&tick_freeze_lock);

	if (tick_freeze_depth == num_online_cpus()) {
		timekeeping_resume();
		trace_suspend_resume(TPS("timekeeping_freeze"),
				     smp_processor_id(), false);
	} else {
		tick_resume_local();
	}

	tick_freeze_depth--;

	raw_spin_unlock(&tick_freeze_lock);
}
Example #16
0
/**
 * suspend_enter - Make the system enter the given sleep state.
 * @state: System sleep state to enter.
 * @wakeup: Returns information that the sleep state should not be re-entered.
 *
 * This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
	int error;

	error = platform_suspend_prepare(state);
	if (error)
		goto Platform_finish;

	error = dpm_suspend_late(PMSG_SUSPEND);
	if (error) {
		pr_err("late suspend of devices failed\n");
		goto Platform_finish;
	}
	error = platform_suspend_prepare_late(state);
	if (error)
		goto Devices_early_resume;

	if (state == PM_SUSPEND_TO_IDLE && pm_test_level != TEST_PLATFORM) {
		s2idle_loop();
		goto Platform_early_resume;
	}

	error = dpm_suspend_noirq(PMSG_SUSPEND);
	if (error) {
		pr_err("noirq suspend of devices failed\n");
		goto Platform_early_resume;
	}
	error = platform_suspend_prepare_noirq(state);
	if (error)
		goto Platform_wake;

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	system_state = SYSTEM_SUSPEND;

	error = syscore_suspend();
	if (!error) {
		*wakeup = pm_wakeup_pending();
		if (!(suspend_test(TEST_CORE) || *wakeup)) {
			trace_suspend_resume(TPS("machine_suspend"),
				state, true);
			error = suspend_ops->enter(state);
			trace_suspend_resume(TPS("machine_suspend"),
				state, false);
		} else if (*wakeup) {
			error = -EBUSY;
		}
		syscore_resume();
	}

	system_state = SYSTEM_RUNNING;

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	platform_resume_noirq(state);
	dpm_resume_noirq(PMSG_RESUME);

 Platform_early_resume:
	platform_resume_early(state);

 Devices_early_resume:
	dpm_resume_early(PMSG_RESUME);

 Platform_finish:
	platform_resume_finish(state);
	return error;
}
Example #17
0
/**
 * create_image - Create a hibernation image.
 * @platform_mode: Whether or not to use the platform driver.
 *
 * Execute device drivers' "late" and "noirq" freeze callbacks, create a
 * hibernation image and run the drivers' "noirq" and "early" thaw callbacks.
 *
 * Control reappears in this routine after the subsequent restore.
 */
static int create_image(int platform_mode)
{
	int error;

	error = dpm_suspend_end(PMSG_FREEZE);
	if (error) {
		pr_err("Some devices failed to power down, aborting hibernation\n");
		return error;
	}

	error = platform_pre_snapshot(platform_mode);
	if (error || hibernation_test(TEST_PLATFORM))
		goto Platform_finish;

	error = disable_nonboot_cpus();
	if (error || hibernation_test(TEST_CPUS))
		goto Enable_cpus;

	local_irq_disable();

	error = syscore_suspend();
	if (error) {
		pr_err("Some system devices failed to power down, aborting hibernation\n");
		goto Enable_irqs;
	}

	if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
		goto Power_up;

	in_suspend = 1;
	save_processor_state();
	trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true);
	error = swsusp_arch_suspend();
	/* Restore control flow magically appears here */
	restore_processor_state();
	trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false);
	if (error)
		pr_err("Error %d creating hibernation image\n", error);

	if (!in_suspend) {
		events_check_enabled = false;
		clear_free_pages();
	}

	platform_leave(platform_mode);

 Power_up:
	syscore_resume();

 Enable_irqs:
	local_irq_enable();

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_finish:
	platform_finish(platform_mode);

	dpm_resume_start(in_suspend ?
		(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);

	return error;
}
Example #18
0
/**
 * suspend_enter - Make the system enter the given sleep state.
 * @state: System sleep state to enter.
 * @wakeup: Returns information that the sleep state should not be re-entered.
 *
 * This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
	int error, last_dev;

	error = platform_suspend_prepare(state);
	if (error)
		goto Platform_finish;

	error = dpm_suspend_late(PMSG_SUSPEND);
	if (error) {
		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
		last_dev %= REC_FAILED_NUM;
		printk(KERN_ERR "PM: late suspend of devices failed\n");
		log_suspend_abort_reason("%s device failed to power down",
			suspend_stats.failed_devs[last_dev]);
		goto Platform_finish;
	}
	error = platform_suspend_prepare_late(state);
	if (error)
		goto Devices_early_resume;

	error = dpm_suspend_noirq(PMSG_SUSPEND);
	if (error) {
		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
		last_dev %= REC_FAILED_NUM;
		printk(KERN_ERR "PM: noirq suspend of devices failed\n");
		log_suspend_abort_reason("noirq suspend of %s device failed",
			suspend_stats.failed_devs[last_dev]);
		goto Platform_early_resume;
	}
	error = platform_suspend_prepare_noirq(state);
	if (error)
		goto Platform_wake;

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	/*
	 * PM_SUSPEND_FREEZE equals
	 * frozen processes + suspended devices + idle processors.
	 * Thus we should invoke freeze_enter() soon after
	 * all the devices are suspended.
	 */
	if (state == PM_SUSPEND_FREEZE) {
		trace_suspend_resume(TPS("machine_suspend"), state, true);
		freeze_enter();
		trace_suspend_resume(TPS("machine_suspend"), state, false);
		goto Platform_wake;
	}

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS)) {
		log_suspend_abort_reason("Disabling non-boot cpus failed");
		goto Enable_cpus;
	}

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = syscore_suspend();
	if (!error) {
		*wakeup = pm_wakeup_pending();
		if (!(suspend_test(TEST_CORE) || *wakeup)) {
			trace_suspend_resume(TPS("machine_suspend"),
				state, true);
			error = suspend_ops->enter(state);
			trace_suspend_resume(TPS("machine_suspend"),
				state, false);
			events_check_enabled = false;
		} else if (*wakeup) {
			pm_get_active_wakeup_sources(suspend_abort,
				MAX_SUSPEND_ABORT_LEN);
			log_suspend_abort_reason(suspend_abort);
			error = -EBUSY;
		}
		syscore_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	platform_resume_noirq(state);
	dpm_resume_noirq(PMSG_RESUME);

 Platform_early_resume:
	platform_resume_early(state);

 Devices_early_resume:
	dpm_resume_early(PMSG_RESUME);

 Platform_finish:
	platform_resume_finish(state);
	return error;
}
Example #19
0
/**
 * suspend_enter - Make the system enter the given sleep state.
 * @state: System sleep state to enter.
 * @wakeup: Returns information that the sleep state should not be re-entered.
 *
 * This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
	int error;

	error = platform_suspend_prepare(state);
	if (error)
		goto Platform_finish;

	error = dpm_suspend_late(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: late suspend of devices failed\n");
		goto Platform_finish;
	}
	error = platform_suspend_prepare_late(state);
	if (error)
		goto Devices_early_resume;

	error = dpm_suspend_noirq(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: noirq suspend of devices failed\n");
		goto Platform_early_resume;
	}
	error = platform_suspend_prepare_noirq(state);
	if (error)
		goto Platform_wake;

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	/*
	 * PM_SUSPEND_FREEZE equals
	 * frozen processes + suspended devices + idle processors.
	 * Thus we should invoke freeze_enter() soon after
	 * all the devices are suspended.
	 */
	if (state == PM_SUSPEND_FREEZE) {
		trace_suspend_resume(TPS("machine_suspend"), state, true);
		freeze_enter();
		trace_suspend_resume(TPS("machine_suspend"), state, false);
		goto Platform_wake;
	}

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	arch_suspend_disable_irqs();
	BUG_ON(!irqs_disabled());

	error = syscore_suspend();
	if (!error) {
		*wakeup = pm_wakeup_pending();
		if (!(suspend_test(TEST_CORE) || *wakeup)) {
			trace_suspend_resume(TPS("machine_suspend"),
				state, true);
			error = suspend_ops->enter(state);
			trace_suspend_resume(TPS("machine_suspend"),
				state, false);
			events_check_enabled = false;
		}
		syscore_resume();
	}

	arch_suspend_enable_irqs();
	BUG_ON(irqs_disabled());

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	platform_resume_noirq(state);
	dpm_resume_noirq(PMSG_RESUME);

 Platform_early_resume:
	platform_resume_early(state);

 Devices_early_resume:
	dpm_resume_early(PMSG_RESUME);

 Platform_finish:
	platform_resume_finish(state);
	return error;
}
Example #20
0
/* Function: mesh
 * 
 * Description: Reads in 3D feature points, contours, and camera coefficients 
 *     from file. Converts the 3D feature points to PCA space, creates a thin
 *     plate spline over a 3D grid, and converts the mesh back to the original
 *     space. Using the camera coefficients to project the points of the mesh
 *     to 2D, we then set only points that lie within the input contours as
 *     valid. The resulting valid mesh points are then outputed to file for each
 *     input contour.
 * 
 * Parameters:
 *     features3DFilename: filename of the 3D features
 *     contoursFilename: filename of the vertices for each contour
 *     cameraCoefficientsFilename: filename of the camera coefficients
 *     meshPointsFilenames: filenames of the files to write each contour mesh
 *         points to
 *     numMeshFiles: number of mesh point files (must be same as number of contours)
 *     regularization: smoothing parameter for the TPS calculations
 *     errorMessage: string to output an error message to, on error
 * 
 * Returns: 0 on success, 1 on error.
 */
int mesh(
    _In_ char *features3DFilename,
    _In_ char *contoursFilename, 
    _In_ char *cameraCoefficientsFilename,
    _In_ char **meshPointsFilenames,
    _In_ int numMeshFiles,
    _In_ double regularization,
    _Out_ char *errorMessage)
{    
    // variable to store error status returned from functions
    int status;
    
    int numContours;
    CvPoint3D32f **features3D;    
    int *numFeaturesInContours;
    
    // read the input triangulated 3D features from file
    status = read3DFeaturesFromInputFile(&features3D, &numFeaturesInContours, &numContours, features3DFilename);
    
    if (status == INPUT_FILE_OPEN_ERROR)
    {
        sprintf(errorMessage, "Could not open 3D features file.");
        return 1;
    }
    
    if (status == INVALID_NUM_CONTOURS_ERROR)
    {
        sprintf(errorMessage, "At least 1 contour region required.");
        return 1;
    }
    
    if (status == INCORRECT_INPUT_FILE_FORMAT_ERROR)
    {
        sprintf(errorMessage, "3D features file has incorrect format.");
        return 1;
    } 
    
    if (status == OUT_OF_MEMORY_ERROR)
    {
        sprintf(errorMessage, "Out of memory error.");
        return 1;
    }
    
    if (numContours != numMeshFiles)
    {
        sprintf(errorMessage, "Number of contours passed into function and read in from 3D features file must match.");
        return 1;
    }
    
    CvSeq* contours;
    CvMemStorage *contourStorage = cvCreateMemStorage(0);
        
    if (contourStorage == NULL)
    {
        sprintf(errorMessage, "Out of memory error.");
        return 1;
    }
    
    int numContoursFromContourFile;
    
    // read the input region of interest contours from file
    status = readContourVerticesFromInputFile(&contours, contourStorage, &numContoursFromContourFile, contoursFilename);
    
    if (status == INPUT_FILE_OPEN_ERROR)
    {
        sprintf(errorMessage, "Could not open contour vertices file.");
        return 1;
    }
    
    if (status == INCORRECT_INPUT_FILE_FORMAT_ERROR)
    {
        sprintf(errorMessage, "Contour vertices file has incorrect format.");
        return 1;
    }
    
    if (numContours != numContoursFromContourFile)
    {
        sprintf(errorMessage, "Number of contours in contour vertices file and 3D features file must match.");
        return 1;
    }
    
    double **cameraCoefficients;
    int numCameras;
    
    // get the number of cameras and 11 camera coefficients for each camera from 
    // file
    status = readCoefficientsFromInputFile(&cameraCoefficients, &numCameras, cameraCoefficientsFilename);
    
    if (status == INPUT_FILE_OPEN_ERROR)
    {
        sprintf(errorMessage, "Could not open camera coefficients file.");
        return 1;
    }
    
    if (status == INCORRECT_INPUT_FILE_FORMAT_ERROR)
    {
        sprintf(errorMessage, "Camera coefficients file has incorrect format.");
        return 1;
    }
    
    if (status == INCORRECT_NUM_CAMERAS_ERROR)
    {
        sprintf(errorMessage, "At least 2 cameras are required for triangulation.");
        return 1;
    }    
    
    if (status == OUT_OF_MEMORY_ERROR)
    {
        sprintf(errorMessage, "Out of memory error.");
        return 1;
    }
    
    CvPoint3D32f **features3DGrid = (CvPoint3D32f **)malloc(numContours * sizeof(CvPoint3D32f *));
    char **validFeatureIndicator = (char **)malloc(numContours * sizeof(char *));    
    
    if (features3DGrid == NULL || validFeatureIndicator == NULL)
    {
        sprintf(errorMessage, "Out of memory error.");
        return 1;
    }
    
    int *numGridPoints = (int *)malloc(numContours * sizeof(int));
    int *numGridPointsInContours = (int *)malloc(numContours * sizeof(int));

    if (numGridPoints == NULL || numGridPointsInContours == NULL)
    {
        sprintf(errorMessage, "Out of memory error.");
        return 1;
    }
    
    CvSeq *contour = contours;
    int k = 0;
    
    // for each contour, calculate the TPS for the feature points
    while (contour)
    {        
        CvPoint3D32f *features3DPrime = (CvPoint3D32f *)malloc(numFeaturesInContours[k] * sizeof(CvPoint3D32f));    
        Data PCAData;
        
        if (createPCA(&PCAData, 3, numFeaturesInContours[k]) == OUT_OF_MEMORY_ERROR)
        {
            sprintf(errorMessage, "Out of memory error.");
            return 1;
        }
        
        // convert the input points to PCA space, and store in features3DPrime
        status = PCA(features3DPrime, features3D[k], numFeaturesInContours[k], &PCAData);
        
        if (status == OUT_OF_MEMORY_ERROR)
        {
            sprintf(errorMessage, "Out of memory error.");
            return 1;
        }
        
        double xPrimeMax, yPrimeMax, xPrimeMin, yPrimeMin;
        
        // get the max and min x and y values of features3DPrime, so that we can create
        // a grid of appropriate size that will encompass the features
        getXPrimeYPrimeMaxMin(features3DPrime, numFeaturesInContours[k], &xPrimeMax, &yPrimeMax, &xPrimeMin, &yPrimeMin);
        
        // multiply the ranges of x and y by 3 to create grid size
        int gridWidth = (int) ((xPrimeMax - xPrimeMin) * 3);
        int gridHeight = (int) ((yPrimeMax - yPrimeMin) * 3);
        
        double **grid = (double **)malloc(gridWidth * sizeof(double *));
        
        if (grid == NULL)
        {
            sprintf(errorMessage, "Out of memory error.");
            return 1;
        }
        
        for (int i = 0; i < gridWidth; i++)
        {
            grid[i] = (double *)malloc(gridHeight * sizeof(double));
            
            if (grid[i] == NULL)
            {
                sprintf(errorMessage, "Out of memory error.");
                return 1;
            }
        }
        
        // since the grid row and column indices start at 0 because it is an array,
        // but the actual x and y start values do not necessarily start 0, we find
        // the actual start values of x and y. This is the middle point between max
        // and min of x or y, minus half the width or height of the grid
        int gridWidthStartIndex = (int)(((xPrimeMax + xPrimeMin)/2) - (gridWidth/2));
        int gridHeightStartIndex = (int)(((yPrimeMax + yPrimeMin)/2) - (gridHeight/2));
        
        // perform thin plate spline calculations to find the smoothed z coordinates
        // for every point in the grid
        status = TPS(features3DPrime, numFeaturesInContours[k], grid, gridHeight, gridWidth, gridHeightStartIndex, gridWidthStartIndex, regularization);
        
        if (status == NOT_ENOUGH_POINTS_ERROR)
        {
            sprintf(errorMessage, "At least 3 valid feature points are required to define a plane for thin sheet spline function.");
            return 1;
        }
        
        if (status == OUT_OF_MEMORY_ERROR)
        {
            sprintf(errorMessage, "Out of memory error.");
            return 1;
        }
        
        numGridPoints[k] = gridHeight * gridWidth;
        
        CvPoint3D32f *features3DGridPrime = (CvPoint3D32f *)malloc(numGridPoints[k] * sizeof(CvPoint3D32f));
        features3DGrid[k] = (CvPoint3D32f *)malloc(numGridPoints[k] * sizeof(CvPoint3D32f));
        
        if (features3DGridPrime == NULL || features3DGrid[k] == NULL)
        {
            sprintf(errorMessage, "Out of memory error.");
            return 1;
        }
        
        // transform the grid to CvPoint3D32f points
        for (int i = 0; i < gridWidth; i++)
        {
            for (int j = 0; j < gridHeight; j++)
            {            
                int curIndex = gridWidth*j + i;
                
                features3DGridPrime[curIndex].x = (double) i+gridWidthStartIndex;
                features3DGridPrime[curIndex].y = (double) j+gridHeightStartIndex;
                features3DGridPrime[curIndex].z = grid[i][j];
            }
        }
        
        // convert the grid points from PCA space back to original space, and store
        // the points in CvPoint3D32f array features3DGrid
        status = reversePCA(features3DGrid[k], features3DGridPrime, numGridPoints[k], &PCAData);
        
        if (status == OUT_OF_MEMORY_ERROR)
        {
            sprintf(errorMessage, "Out of memory error.");
            return 1; 
        }
        
        CvPoint2D32f *idealFeatures2D = (CvPoint2D32f *)malloc(numGridPoints[k] * sizeof(CvPoint2D32f));
        
        if (idealFeatures2D == NULL)
        {
            sprintf(errorMessage, "Out of memory error.");
            return 1;
        }    
        
        // project the 3d grid points to 2D space
        calculateIdealFeatures(idealFeatures2D, features3DGrid[k], numGridPoints[k], cameraCoefficients[0]);
        
        validFeatureIndicator[k] = (char *)malloc(numGridPoints[k] * sizeof(char));
        
        if (validFeatureIndicator[k] == NULL)
        {
            sprintf(errorMessage, "Out of memory error.");
            return 1;
        }
        
        // test each 2d grid point to see if it lies within the input contours, and
        // set the correspoinding values of validFeatureIndicator accordingly
        numGridPointsInContours[k] = areFeaturesInContour(idealFeatures2D, numGridPoints[k], validFeatureIndicator[k], contour);
        
        contour = (CvSeq *)(contour->h_next);
        k++;
        
        free(features3DPrime);
        destroyPCA(&PCAData);
    
        for (int i = 0; i < gridWidth; i++)
        {
            free(grid[i]);
        }
    
        free(grid);
        free(features3DGridPrime);
        free(idealFeatures2D);        
    }    
    
    // print the valid 3D mesh points to files for each contour
    writeGridPointsToFile(meshPointsFilenames, features3DGrid, numGridPoints, validFeatureIndicator, numGridPointsInContours, numContours);     
    
    // cleanup
    for (int i = 0; i < numContours; i++)
    {
        free(features3D[i]);
        free(features3DGrid[i]);
        free(validFeatureIndicator[i]);
    }
    
    free(features3D);    
    free(features3DGrid);    
    free(validFeatureIndicator);
    free(numGridPoints);
    free(numGridPointsInContours);
    
    cvReleaseMemStorage(&contourStorage);
    
    for (int i = 0; i < numCameras; i++)
    {
        free(cameraCoefficients[i]);
    }
    
    free(cameraCoefficients);
               
    return 0;
}