示例#1
0
/*
 * wait 1 second (HZ) for automatic wake locks to time out,
 * or report -EBUSY if a static suspend lock is active
 */
static int wait_timed_wakelocks(void)
{
	unsigned long end_time = jiffies + HZ;
	long max_wait;
	int ret;
	
	do {
		ret = 0;
		max_wait = has_wake_lock(WAKE_LOCK_SUSPEND);

		pr_info("wait_timed_wakelocks: max_wait:%li\n", 
				max_wait);
		
		/* no active lock, exit successfully */
		if (!max_wait)
			break;
		
		/* static lock active, abort suspend */
		if (max_wait < 0) {
			ret = -EBUSY;
			break;
		}
		
		if (time_after(jiffies + max_wait, end_time)) {
			ret = -EAGAIN;
			break;
		}
		
		yield();
		
	} while(true);
	
	return ret;
}
void cpuidle_idle_management(void)
{
     /* go deepsleep if it's registed, else enter idle*/
     unsigned int ret = 1;
     unsigned long flags = 0;
     unsigned int oldslice =0;
     unsigned int newslice =0;
	 
     /* coverity[lock_acquire] */
     local_irq_save(flags);
     
     /*获取进入低功耗的时间*/
     oldslice = bsp_get_slice_value();
    /*lint --e{18,718,746}*/
     if(0 == has_wake_lock(0)){
             if(idle_go_deepsleep)
                    ret = idle_go_deepsleep();/*lint !e732*/
             update_awake_time_stamp();
     }else {
              check_awake_time_limit();
     }
     if(ret)
              hi6930_enter_idle();
        
	 /*获取退出WFI的时间*/
     newslice = bsp_get_slice_value();

     g_ulDfsCcpuIdleTime  += get_timer_slice_delta(oldslice, newslice);
     g_ulDfsCcpuIdleTime_long += get_timer_slice_delta(oldslice, newslice);
	 
     local_irq_restore(flags);
     return;
}
示例#3
0
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
			   const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
#ifdef CONFIG_EARLYSUSPEND
	suspend_state_t state = PM_SUSPEND_ON;
#else
	suspend_state_t state = PM_SUSPEND_STANDBY;
#endif
	const char * const *s;
#endif
	char *p;
	int len;
	int error = -EINVAL;

	p = memchr(buf, '\n', n);
	len = p ? p - buf : n;

	/* First, check if we are requested to hibernate */
	if (len == 4 && !strncmp(buf, "disk", len)) {
		error = hibernate();
  goto Exit;
	}

#ifdef CONFIG_SUSPEND
	for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
		if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
			break;
	}
	if (state < PM_SUSPEND_MAX && *s)
#ifdef CONFIG_EARLYSUSPEND && CONFIG_HAS_WAKELOCK
		if (state == PM_SUSPEND_ON || valid_state(state)) {
			error = 0;
                request_suspend_state(state);
		}
#elif defined CONFIG_HAS_WAKELOCK
        wake_unlock(&main_wake_lock);  //Release main lock in the hope of going to sleep
        
        if (!has_wake_lock(WAKE_LOCK_SUSPEND)) {
            wake_timer.data = current;
            mod_timer(&wake_timer, jiffies + HZ * 5); //To prevent us from freezing indefinitely
            
            set_freeze_flag(current);
            try_to_freeze();
            
            del_timer(&wake_timer);
        }
      
        wake_lock(&main_wake_lock);  //Re-grab the main lock so that we do not go into sleep again
#else         
        error = enter_state(state);
#endif
#endif

 Exit:
	return error ? error : n;
}
/**
 * suspend_devices_and_enter - Suspend devices and enter system sleep state.
 * @state: System sleep state to enter.
 */
int suspend_devices_and_enter(suspend_state_t state)
{
    int error;
    bool wakeup = false;

    if (!suspend_ops)
        return -ENOSYS;

    trace_machine_suspend(state);
    if (suspend_ops->begin) {
        error = suspend_ops->begin(state);
        if (error)
            goto Close;
    }
    suspend_console();
    suspend_test_start();
    error = dpm_suspend_start(PMSG_SUSPEND);
    /*OPPO 2012-11-27 zhzhyon Add for reason*/
#ifdef CONFIG_VENDOR_EDIT
    if(has_wake_lock(WAKE_LOCK_SUSPEND))
    {
        goto Resume_devices;
    }
#endif
    /*OPPO 2012-11-27 zhzhyon Add end*/
    if (error) {
        printk(KERN_ERR "PM: Some devices failed to suspend\n");
        goto Recover_platform;
    }
    suspend_test_finish("suspend devices");
    if (suspend_test(TEST_DEVICES))
        goto Recover_platform;

    do {
        error = suspend_enter(state, &wakeup);
    } while (!error && !wakeup
             && suspend_ops->suspend_again && suspend_ops->suspend_again());

Resume_devices:
    suspend_test_start();
    dpm_resume_end(PMSG_RESUME);
    suspend_test_finish("resume devices");
    resume_console();
Close:
    if (suspend_ops->end)
        suspend_ops->end();
    trace_machine_suspend(PWR_EVENT_EXIT);
    return error;

Recover_platform:
    if (suspend_ops->recover)
        suspend_ops->recover();
    goto Resume_devices;
}
示例#5
0
/**
 *	suspend_enter - enter the desired system sleep state.
 *	@state:		state to enter
 *
 *	This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state)
{
	int error;

	if (suspend_ops->prepare) {
		error = suspend_ops->prepare();
		if (error)
			return error;
	}

	error = dpm_suspend_noirq(PMSG_SUSPEND);
	if (error) {
		printk(KERN_ERR "PM: Some devices failed to power down\n");
		goto Platfrom_finish;
	}

	if (suspend_ops->prepare_late) {
		error = suspend_ops->prepare_late();
		if (error)
			goto Power_up_devices;
	}

	if (suspend_test(TEST_PLATFORM))
		goto Platform_wake;

	error = disable_nonboot_cpus();
	if (error || suspend_test(TEST_CPUS))
		goto Enable_cpus;

	error = _suspend_enter(state);
#ifdef CONFIG_QUICK_WAKEUP
		while (!error && !quickwakeup_execute()) {
			if (has_wake_lock(WAKE_LOCK_SUSPEND))
				break;
			error = _suspend_enter(state);
		}
#endif

 Enable_cpus:
	enable_nonboot_cpus();

 Platform_wake:
	if (suspend_ops->wake)
		suspend_ops->wake();

 Power_up_devices:
	dpm_resume_noirq(PMSG_RESUME);

 Platfrom_finish:
	if (suspend_ops->finish)
		suspend_ops->finish();

	return error;
}
示例#6
0
static int print_thread(void * data)
{
	while(1){
		wake_lock(&messages_wakelock);
		if (print_thread_enable)
			print_debug_info();
		has_wake_lock(WAKE_LOCK_SUSPEND);
		msleep(100);
		wake_unlock(&messages_wakelock);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(print_thread_interval * HZ);
	}
	return 0;
}
static int print_thread(void * data)
{
	while(1){
		wake_lock(&messages_wakelock);
		print_ahb();
		print_gr();
		print_ana();
		is_dsp_sleep();
		has_wake_lock(WAKE_LOCK_SUSPEND);
		msleep(100);
		wake_unlock(&messages_wakelock);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(30 * HZ);
	}
	return 0;
}
示例#8
0
/**
 *	suspend_enter - enter the desired system sleep state.
 *	@state:		state to enter
 *
 *	This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state)
{
	int error = 0;
	device_pm_lock();
	error = _suspend_enter(state);

#ifdef CONFIG_QUICK_WAKEUP
	while (!error && !quickwakeup_execute()) {
		if (has_wake_lock(WAKE_LOCK_SUSPEND))
			break;
		error = _suspend_enter(state);
	}
#endif
	device_pm_unlock();
	return error;
}
示例#9
0
static int try_to_freeze_tasks(bool sig_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	unsigned int wakeup = 0;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;
	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (frozen(p) || !freezeable(p))
				continue;

			if (!freeze_task(p, sig_only))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					sig_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
					"(%d tasks refusing to freeze):\n",
					elapsed_csecs / 100, elapsed_csecs % 100, todo);
		}
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			task_lock(p);
			if (freezing(p) && !freezer_should_skip(p) &&
				elapsed_csecs > 100)
				sched_show_task(p);
			cancel_freezing(p);
			task_unlock(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
示例#10
0
static int try_to_freeze_tasks(bool sig_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	unsigned int wakeup = 0;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;
	do {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (frozen(p) || !freezeable(p))
				continue;

			if (!freeze_task(p, sig_only))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's
			 * scheduler lock after setting TIF_FREEZE, it's
			 * guaranteed that either we see TASK_RUNNING or
			 * try_to_stop() after schedule() in ptrace/signal
			 * stop sees TIF_FREEZE.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
		yield();			/* Yield is okay here */
		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (time_after(jiffies, end_time))
			break;
	} while (todo);

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
				"(%d tasks refusing to freeze):\n",
				wakeup ? "aborted" : "failed",
				elapsed_csecs / 100, elapsed_csecs % 100, todo);
		if(!wakeup)
			show_state();
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			task_lock(p);
			if (freezing(p) && !freezer_should_skip(p) &&
							elapsed_csecs > 100)
				printk(KERN_ERR " %s\n", p->comm);
			cancel_freezing(p);
			task_unlock(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
示例#11
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	bool wakeup = false;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's scheduler lock, it's
			 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
			 * transition can't race with task state testing here.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					user_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
			       wakeup ? "aborted" : "failed",
			       elapsed_csecs / 100, elapsed_csecs % 100,
			       todo - wq_busy, wq_busy);
		}

		if (!wakeup) {
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p) &&
				    elapsed_csecs > 100)
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
		}
	} else {
示例#12
0
static int hisik3_pm_enter(suspend_state_t state)
{
	unsigned long flage = 0;

	switch (state) {
		case PM_SUSPEND_STANDBY:
		case PM_SUSPEND_MEM:
			break;
		default:
			return -EINVAL;
	}

	if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
		printk("hisik3_pm_enter has wake lock\n");
		return -EAGAIN;
	}

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	local_irq_save(flage);

	hisik3_pm_save_gic();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_CACHE_L2X0
	hisik3_pm_disable_l2x0();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*set pmu to low power*/
	pmulowpower(1);

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/* here is an workround way to delay 40ms
         * make sure LDO0 is poweroff very clean */
	mdelay(40);

	/* protect timer0_0 timer0_1 and disable timer0 clk */
	protect_timer0_register();

#ifdef CONFIG_LOWPM_DEBUG
	/*set io to lowpower mode*/
	ioshowstatus(1);
	setiolowpower();
	ioshowstatus(1);

	/*set pmu to low power mode*/
	pmulowpower_show(1);
	pmulowpowerall(1);
	pmulowpower_show(1);
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	edb_putstr("[PM]Enter hilpm_cpu_godpsleep...\r\n");

#ifdef CONFIG_LOWPM_DEBUG
	/*time enable*/
	timer0_0_enable();

	/*rtc*/
	rtc_enable();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	hilpm_cpu_godpsleep();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*
	 *the status has been changed in fastboot,
	 *it causes difference with kernel's status,
	 */
	pmctrl_reinit();
	pctrl_reinit();
	sysctrl_reinit();

	/*uart init.*/
	edb_reinit();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_LOWPM_DEBUG
	/*restore debug uart0*/
	debuguart_reinit();

	/*disable timer0*/
	timer0_0_disable();

	/*restore pmu config*/
	pmulowpowerall(0);
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*PMU regs restore*/
	pmulowpower(0);

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/* restore timer0_0 timer0_1 and enable timer0 clk */
	restore_timer0_register();

	flush_cache_all();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_CACHE_L2X0
	hisik3_pm_enable_l2x0();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	hisik3_pm_retore_gic();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	local_irq_restore(flage);

	pr_notice("[PM]Restore OK.\r\n");

	return 0;
}
示例#13
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_msecs64;
	unsigned int elapsed_msecs;
	bool wakeup = false;
	int sleep_usecs = USEC_PER_MSEC;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		usleep_range(sleep_usecs / 2, sleep_usecs);
		if (sleep_usecs < 8 * USEC_PER_MSEC)
			sleep_usecs *= 2;
	}

	do_gettimeofday(&end);
	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_msecs64, NSEC_PER_MSEC);
	elapsed_msecs = elapsed_msecs64;

	if (todo) {
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					user_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
			       wakeup ? "aborted" : "failed",
			       elapsed_msecs / 1000, elapsed_msecs % 1000,
			       todo - wq_busy, wq_busy);
		}

		if (!wakeup) {
#ifdef CONFIG_MSM_WATCHDOG
			
			msm_watchdog_suspend(NULL);
#endif
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p) &&
				    elapsed_msecs > 1000)
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
#ifdef CONFIG_MSM_WATCHDOG
			msm_watchdog_resume(NULL);
#endif
		}
	} else {
示例#14
0
/*
 * Put CPU in low power mode.
 */
void arch_idle(void)
{
	bool allow[MSM_PM_SLEEP_MODE_NR];
	uint32_t sleep_limit = SLEEP_LIMIT_NONE;

	int64_t timer_expiration;
	int latency_qos;
	int ret;
	int i;
	unsigned int cpu;
	int64_t t1;
	static DEFINE_PER_CPU(int64_t, t2);
	int exit_stat;

	if (!atomic_read(&msm_pm_init_done))
		return;

	cpu = smp_processor_id();
	latency_qos = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	/* get the next timer expiration */
	timer_expiration = ktime_to_ns(tick_nohz_get_sleep_length());

	t1 = ktime_to_ns(ktime_get());
	msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - __get_cpu_var(t2));
	msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, timer_expiration);
	exit_stat = MSM_PM_STAT_IDLE_SPIN;

	for (i = 0; i < ARRAY_SIZE(allow); i++)
		allow[i] = true;

	if (num_online_cpus() > 1 ||
		(timer_expiration < msm_pm_idle_sleep_min_time) ||
#ifdef CONFIG_HAS_WAKELOCK
		has_wake_lock(WAKE_LOCK_IDLE) ||
#endif
		!msm_pm_irq_extns->idle_sleep_allowed()) {
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
	}

	for (i = 0; i < ARRAY_SIZE(allow); i++) {
		struct msm_pm_platform_data *mode =
					&msm_pm_modes[MSM_PM_MODE(cpu, i)];
		if (!mode->idle_supported || !mode->idle_enabled ||
			mode->latency >= latency_qos ||
			mode->residency * 1000ULL >= timer_expiration)
			allow[i] = false;
	}

	if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
		uint32_t wait_us = CONFIG_MSM_IDLE_WAIT_ON_MODEM;
		while (msm_pm_modem_busy() && wait_us) {
			if (wait_us > 100) {
				udelay(100);
				wait_us -= 100;
			} else {
				udelay(wait_us);
				wait_us = 0;
			}
		}

		if (msm_pm_modem_busy()) {
			allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
			allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]
				= false;
		}
	}

	MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
		"%s(): latency qos %d, next timer %lld, sleep limit %u\n",
		__func__, latency_qos, timer_expiration, sleep_limit);

	for (i = 0; i < ARRAY_SIZE(allow); i++)
		MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
			"%s(): allow %s: %d\n", __func__,
			msm_pm_sleep_mode_labels[i], (int)allow[i]);

	if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
		allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
		/* Sync the timer with SCLK, it is needed only for modem
		 * assissted pollapse case.
		 */
		int64_t next_timer_exp = msm_timer_enter_idle();
		uint32_t sleep_delay;
		bool low_power = false;

		sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
			next_timer_exp, MSM_PM_SLEEP_TICK_LIMIT);

		if (sleep_delay == 0) /* 0 would mean infinite time */
			sleep_delay = 1;

		if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
			sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;

#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_ACTIVE)
		sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_RETENTION)
		sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
#endif

		ret = msm_pm_power_collapse(true, sleep_delay, sleep_limit);
		low_power = (ret != -EBUSY && ret != -ETIMEDOUT);
		msm_timer_exit_idle(low_power);

		if (ret)
			exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
		else {
			exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
			msm_pm_sleep_limit = sleep_limit;
		}
	} else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
		ret = msm_pm_power_collapse_standalone(true);
		exit_stat = ret ?
			MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE :
			MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
	} else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
		ret = msm_pm_swfi(true);
		if (ret)
			while (!msm_pm_irq_extns->irq_pending())
				udelay(1);
		exit_stat = ret ? MSM_PM_STAT_IDLE_SPIN : MSM_PM_STAT_IDLE_WFI;
	} else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
		msm_pm_swfi(false);
		exit_stat = MSM_PM_STAT_IDLE_WFI;
	} else {
		while (!msm_pm_irq_extns->irq_pending())
			udelay(1);
		exit_stat = MSM_PM_STAT_IDLE_SPIN;
	}

	__get_cpu_var(t2) = ktime_to_ns(ktime_get());
	msm_pm_add_stat(exit_stat, __get_cpu_var(t2) - t1);
}
示例#15
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_msecs64;
	unsigned int elapsed_msecs;
	bool wakeup = false;
	int sleep_usecs = USEC_PER_MSEC;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's scheduler lock, it's
			 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
			 * transition can't race with task state testing here.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the refrigerator.  Start with an initial
		 * 1 ms sleep followed by exponential backoff until 8 ms.
		 */
		usleep_range(sleep_usecs / 2, sleep_usecs);
		if (sleep_usecs < 8 * USEC_PER_MSEC)
			sleep_usecs *= 2;
	}

	do_gettimeofday(&end);
	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_msecs64, NSEC_PER_MSEC);
	elapsed_msecs = elapsed_msecs64;

	if (todo) {
		printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
		       wakeup ? "aborted" : "failed",
		       elapsed_msecs / 1000, elapsed_msecs % 1000,
		       todo - wq_busy, wq_busy);

		if (!wakeup) {
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p) &&
				    elapsed_msecs > 1000)
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
		}
	} else {
示例#16
0
static int try_to_freeze_tasks(bool sig_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	bool wakeup = false;
#ifdef CONFIG_SHSYS_CUST
	struct timespec tu;
#endif

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!sig_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (frozen(p) || !freezable(p))
				continue;

			if (!freeze_task(p, sig_only))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's
			 * scheduler lock after setting TIF_FREEZE, it's
			 * guaranteed that either we see TASK_RUNNING or
			 * try_to_stop() after schedule() in ptrace/signal
			 * stop sees TIF_FREEZE.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!sig_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
#ifdef CONFIG_SHSYS_CUST
		tu.tv_sec = 0;
		tu.tv_nsec = 10000000;
		hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
#else
		msleep(10);
#endif
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					sig_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
			       elapsed_csecs / 100, elapsed_csecs % 100,
			       todo - wq_busy, wq_busy);
		}
		thaw_workqueues();

		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			task_lock(p);
			if (freezing(p) && !freezer_should_skip(p) &&
				elapsed_csecs > 100)
				sched_show_task(p);
			cancel_freezing(p);
			task_unlock(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
static int suspend(bool do_deepsleep)
{
	bool pins_force = pins_suspend_force_mux && pins_suspend_force;
	int ret = 0;

	if (sleep_is_blocked()) {
		pr_info("suspend/resume: interrupted by modem(%d) or event(%d)\n",
			atomic_read(&block_sleep_modem),
			atomic_read(&block_sleep_event));
		return -EBUSY;
	}

	if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
		pr_info("suspend/resume: wakelock has been locked!\n");
		return -EBUSY;
	}

	nmk_gpio_clocks_enable();

	ux500_suspend_dbg_add_wake_on_uart();
	nmk_gpio_wakeups_suspend();

	/* configure the prcm for a sleep wakeup */
	prcmu_enable_wakeups(PRCMU_WAKEUP(ABB) | PRCMU_WAKEUP(RTC));

	ux500_rtcrtt_next_seconds(alarm_sec);

	context_vape_save();

	if (pins_force) {
		/*
		 * Save GPIO settings before applying power save
		 * settings
		 */
		context_gpio_save();

		/* Apply GPIO power save mux settings */
		context_gpio_mux_safe_switch(true);
		pins_suspend_force_mux();
		context_gpio_mux_safe_switch(false);

		/* Apply GPIO power save settings */
		pins_suspend_force();
	}

	ux500_pm_gic_decouple();

	if (ux500_pm_gic_pending_interrupt()) {
		pr_info("suspend/resume: pending interrupt\n");

		/* Recouple GIC with the interrupt bus */
		ux500_pm_gic_recouple();
		ret = -EBUSY;

		goto exit;
	}
	ux500_pm_prcmu_set_ioforce(true);

	if (do_deepsleep) {
		context_varm_save_common();
		context_varm_save_core();
		context_gic_dist_disable_unneeded_irqs();
		context_save_cpu_registers();

		/*
		 * Due to we have only 100us between requesting a powerstate
		 * and wfi, we clean the cache before as well to assure the
		 * final cache clean before wfi has as little as possible to
		 * do.
		 */
		context_clean_l1_cache_all();

		(void) prcmu_set_power_state(PRCMU_AP_DEEP_SLEEP,
					     false, false);
		context_save_to_sram_and_wfi(true);

		context_restore_cpu_registers();
		context_varm_restore_core();
		context_varm_restore_common();

	} else {

		context_clean_l1_cache_all();
		(void) prcmu_set_power_state(APEXECUTE_TO_APSLEEP,
					     false, false);
		dsb();
		__asm__ __volatile__("wfi\n\t" : : : "memory");
	}

	context_vape_restore();

	/* If GPIO woke us up then save the pins that caused the wake up */
	ux500_pm_gpio_save_wake_up_status();

	ux500_suspend_dbg_sleep_status(do_deepsleep);

	/* APE was turned off, restore IO ring */
	ux500_pm_prcmu_set_ioforce(false);

exit:
	if (pins_force) {
		/* Restore gpio settings */
		context_gpio_mux_safe_switch(true);
		context_gpio_restore_mux();
		context_gpio_mux_safe_switch(false);
		context_gpio_restore();
	}

	ux500_rtcrtt_off();

	/* This is what cpuidle wants */
	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
			     PRCMU_WAKEUP(ABB));

	nmk_gpio_wakeups_resume();
	ux500_suspend_dbg_remove_wake_on_uart();

	nmk_gpio_clocks_disable();

	return ret;
}
示例#18
0
static int suspend(bool do_deepsleep)
{
	bool pins_force = pins_suspend_force_mux && pins_suspend_force;
	int ret = 0;
	u32 pending_irq;

	if (suspend_sleep_is_blocked()) {
		pr_info("suspend/resume: interrupted by modem.\n");
		return -EBUSY;
	}

	if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
		pr_info("suspend/resume: aborted. wakelock has been taken.\n");
		return -EBUSY;
	}

	nmk_gpio_clocks_enable();

	ux500_suspend_dbg_add_wake_on_uart();

	nmk_gpio_wakeups_suspend();

	/* configure the prcm for a sleep wakeup */
	prcmu_enable_wakeups(suspend_wakeups);

	ux500_suspend_dbg_test_set_wakeup();

	context_vape_save();

	if (pins_force) {
		/*
		 * Save GPIO settings before applying power save
		 * settings
		 */
		context_gpio_save();

		/* Apply GPIO power save mux settings */
		context_gpio_mux_safe_switch(true);
		pins_suspend_force_mux();
		context_gpio_mux_safe_switch(false);

		/* Apply GPIO power save settings */
		pins_suspend_force();
	}

	ux500_pm_gic_decouple();

	/* Copy GIC interrupt settings to PRCMU interrupt settings */
	ux500_pm_prcmu_copy_gic_settings();

	if (ux500_pm_gic_pending_interrupt()) {
		pr_info("suspend/resume: pending interrupt gic\n");

		/* Recouple GIC with the interrupt bus */
		ux500_pm_gic_recouple();
		ret = -EBUSY;

		goto exit;
	}

	if (ux500_pm_prcmu_pending_interrupt(&pending_irq)) {
		pr_info("suspend/resume: pending interrupt prcmu: %u\n",
				pending_irq);

		/* Recouple GIC with the interrupt bus */
		ux500_pm_gic_recouple();
		ret = -EBUSY;

		goto exit;
	}
	ux500_pm_prcmu_set_ioforce(true);

	if (do_deepsleep) {
		context_varm_save_common();
		context_varm_save_core();
		context_gic_dist_disable_unneeded_irqs();
		context_save_cpu_registers();

		/*
		 * Due to we have only 100us between requesting a powerstate
		 * and wfi, we clean the cache before as well to assure the
		 * final cache clean before wfi has as little as possible to
		 * do.
		 */
		context_clean_l1_cache_all();

		(void) prcmu_set_power_state(PRCMU_AP_DEEP_SLEEP,
					     false, false);
		context_save_to_sram_and_wfi(true);

		context_restore_cpu_registers();
		context_varm_restore_core();
		context_varm_restore_common();

	} else {

		context_clean_l1_cache_all();
		(void) prcmu_set_power_state(PRCMU_AP_SLEEP,
					     false, false);
		dsb();
		__asm__ __volatile__("wfi\n\t" : : : "memory");
	}

	context_vape_restore();

	/* If GPIO woke us up then save the pins that caused the wake up */
	ux500_pm_gpio_save_wake_up_status();

	ux500_suspend_dbg_sleep_status(do_deepsleep);

	/* APE was turned off, restore IO ring */
	ux500_pm_prcmu_set_ioforce(false);

exit:
	if (pins_force) {
		/* Restore gpio settings */
		context_gpio_mux_safe_switch(true);
		context_gpio_restore_mux();
		context_gpio_mux_safe_switch(false);
		context_gpio_restore();
	}

	/* Configure the prcmu with the wake-ups that cpuidle needs */
	prcmu_enable_wakeups(running_wakeups);

	nmk_gpio_wakeups_resume();

	ux500_suspend_dbg_remove_wake_on_uart();

	nmk_gpio_clocks_disable();

	return ret;
}
示例#19
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	bool wakeup = false;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					user_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
			       wakeup ? "aborted" : "failed",
			       elapsed_csecs / 100, elapsed_csecs % 100,
			       todo - wq_busy, wq_busy);
		}

		if (!wakeup) {
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p) &&
				    elapsed_csecs > 100)
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
		}
	} else {
示例#20
0
/**
 * suspend_enter - Make the system enter the given sleep state.
 * @state: System sleep state to enter.
 * @wakeup: Returns information that the sleep state should not be re-entered.
 *
 * This function should be called after devices have been suspended.
 */
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
    int error;

    if (suspend_ops->prepare) {
        error = suspend_ops->prepare();
        if (error)
            goto Platform_finish;
    }

    error = dpm_suspend_end(PMSG_SUSPEND);
    if (error) {
        printk(KERN_ERR "PM: Some devices failed to power down\n");
        goto Platform_finish;
    }

    if (suspend_ops->prepare_late) {
        error = suspend_ops->prepare_late();
        if (error)
            goto Platform_wake;
    }

    if (suspend_test(TEST_PLATFORM))
        goto Platform_wake;

    error = disable_nonboot_cpus();
    if (error || suspend_test(TEST_CPUS))
        goto Enable_cpus;

    arch_suspend_disable_irqs();
    BUG_ON(!irqs_disabled());

    error = syscore_suspend();
    if (!error) {
        *wakeup = pm_wakeup_pending();
        if (!(suspend_test(TEST_CORE) || *wakeup)) {
            /*OPPO 2012-11-27 zhzhyon Add for headset detect*/
#ifdef CONFIG_VENDOR_EDIT
            if(has_wake_lock(WAKE_LOCK_SUSPEND))
            {
                goto Resume_devices;
            }
#endif
            /*OPPO 2012-11-27 zhzhyon Add end*/

            error = suspend_ops->enter(state);
            events_check_enabled = false;
        }
        /*OPPO 2012-11-27 zhzhyon Add for headset detect*/
#ifdef CONFIG_VENDOR_EDIT
Resume_devices:
#endif
        /*OPPO 2012-11-27 zhzhyon Add end*/
        syscore_resume();
    }

    arch_suspend_enable_irqs();
    BUG_ON(irqs_disabled());

Enable_cpus:
    enable_nonboot_cpus();

Platform_wake:
    if (suspend_ops->wake)
        suspend_ops->wake();

    dpm_resume_start(PMSG_RESUME);

Platform_finish:
    if (suspend_ops->finish)
        suspend_ops->finish();

    return error;
}