static int __devinit ram_console_probe_oneplus(struct platform_device *pdev)
{
    INIT_WORK(&(optimize_data.work), optimize_ramconsole_oneplus_func);
    optimize_data.pdev = pdev;
    schedule_work_on(cpu_is_offline(2)?0:2,&(optimize_data.work));
    return 0;
}
Ejemplo n.º 2
0
void lru_add_drain_all(void)
{
	static DEFINE_MUTEX(lock);
	static struct cpumask has_work;
	int cpu;

	mutex_lock(&lock);
	get_online_cpus();
	cpumask_clear(&has_work);

	for_each_online_cpu(cpu) {
		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);

		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
		    need_activate_page_drain(cpu)) {
			INIT_WORK(work, lru_add_drain_per_cpu);
			schedule_work_on(cpu, work);
			cpumask_set_cpu(cpu, &has_work);
		}
	}

	for_each_cpu(cpu, &has_work)
		flush_work(&per_cpu(lru_add_drain_work, cpu));

	put_online_cpus();
	mutex_unlock(&lock);
}
Ejemplo n.º 3
0
void lge_pm_handle_poweroff(void)
{
#if 1
	lge_pm_low_vbatt_notify();
#else
	schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
#endif
}
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
					unsigned int event)
{
	switch (event) {
	case CPUFREQ_GOV_START:
		schedule_work_on(0, &performance_up_work);
	case CPUFREQ_GOV_LIMITS:
		pr_debug("setting to %u kHz because of event %u\n",
						policy->max, event);
		__cpufreq_driver_target(policy, policy->max,
						CPUFREQ_RELATION_H);
		break;
	case CPUFREQ_GOV_STOP:
		schedule_work_on(0, &performance_down_work);
		break;
	default:
		break;
	}
	return 0;
}
void dual_boost(unsigned int boost_on)
{
	if (boost_on)
	{	
		if (is_dual_locked != 0)
			return;

#ifndef DUALBOOST_DEFERED_QUEUE
		cpu_hotplug_driver_lock();
		if (cpu_is_offline(NON_BOOT_CPU))
		{
			ssize_t ret;
			struct sys_device *cpu_sys_dev;
		
			ret = cpu_up(NON_BOOT_CPU); // it takes 60ms
			if (!ret)
			{
				cpu_sys_dev = get_cpu_sysdev(NON_BOOT_CPU);
				if (cpu_sys_dev)
				{
					kobject_uevent(&cpu_sys_dev->kobj, KOBJ_ONLINE);
					stall_mpdecision = 1;
				}
			}
		}
		cpu_hotplug_driver_unlock();
#else	
		if (cpu_is_offline(NON_BOOT_CPU))
			schedule_work_on(BOOT_CPU, &dvfs_hotplug_work);
#endif
		is_dual_locked = 1;
	}
	else
	{
		if (stall_mpdecision == 1)
		{
			struct sys_device *cpu_sys_dev;

#ifdef DUALBOOST_DEFERED_QUEUE
			flush_work(&dvfs_hotplug_work);
#endif
			cpu_hotplug_driver_lock();	
			cpu_sys_dev = get_cpu_sysdev(NON_BOOT_CPU);
			if (cpu_sys_dev)
			{
				kobject_uevent(&cpu_sys_dev->kobj, KOBJ_ONLINE);
				stall_mpdecision = 0;
			}
			cpu_hotplug_driver_unlock();
		}
		
		is_dual_locked = 0;
	}
}
Ejemplo n.º 6
0
static void auto_hotplug_early_suspend(struct early_suspend *handler)
{
        pr_info("auto_hotplug: early suspend handler\n");
        flags |= EARLYSUSPEND_ACTIVE;

        /* Cancel all scheduled delayed work to avoid races */
        cancel_delayed_work_sync(&hotplug_offline_work);
        cancel_delayed_work_sync(&hotplug_decision_work);
        if (num_online_cpus() > 1) {
                pr_info("auto_hotplug: Offlining CPUs for early suspend\n");
                schedule_work_on(0, &hotplug_offline_all_work);
        }
}
static enum hrtimer_restart timer_func(struct hrtimer *handle)
{
	struct sleep_data *sleep_info = container_of(handle, struct sleep_data,
			timer);

	if (atomic_read(&sleep_info->timer_expired))
		pr_info("msm_sleep_stats: Missed timer interrupt on cpu %d\n",
				sleep_info->cpu);

	atomic_set(&sleep_info->timer_val_ms, 0);
	atomic_set(&sleep_info->timer_expired, 1);

	schedule_work_on(sleep_info->cpu, &sleep_info->work);

	return HRTIMER_NORESTART;
}
Ejemplo n.º 8
0
void midas_tsp_request_qos(void *data)
{
	if (!work_pending(&flex_work))
		schedule_work_on(0, &flex_work);

	/* Guarantee that the bus runs at >= 266MHz */
	if (!pm_qos_request_active(&busfreq_qos))
		pm_qos_add_request(&busfreq_qos, PM_QOS_BUS_DMA_THROUGHPUT,
				   266000);
	else {
		cancel_delayed_work_sync(&busqos_work);
		pm_qos_update_request(&busfreq_qos, 266000);
	}

	/* Cancel the QoS request after 1/10 sec */
	schedule_delayed_work_on(0, &busqos_work, HZ / 5);
}
Ejemplo n.º 9
0
static int min_online_cpus_fn_set(const char *arg, const struct kernel_param *kp)
{
    int ret; 
    
    ret = param_set_int(arg, kp);
    
    ///at least 1 core must run even if set value is out of range
    if ((min_online_cpus < 1) || (min_online_cpus > CPUS_AVAILABLE))
    {
        min_online_cpus = 1;
    }
    
    //online all cores and offline them based on set value
    schedule_work_on(0, &hotplug_online_all_work);
        
    return ret;
}
Ejemplo n.º 10
0
static int mc_suspend_notifier(struct notifier_block *nb,
	unsigned long event, void *dummy)
{
	struct mc_mcp_buffer *mcp = ctx->mcp;
	/* We have noting to say if MobiCore is not initialized */
	if (!mcp)
		return 0;

#ifdef MC_MEM_TRACES
	mobicore_log_read();
#endif

	switch (event) {
	case PM_SUSPEND_PREPARE:
		/*
		 * Make sure we have finished all the work otherwise
		 * we end up in a race condition
		 */
		cancel_work_sync(&suspend_work);
		/*
		 * We can't go to sleep if MobiCore is not IDLE
		 * or not Ready to sleep
		 */
		dump_sleep_params(&mcp->flags);
		if (!sleep_ready()) {
			ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
			schedule_work_on(0, &suspend_work);
			flush_work(&suspend_work);
			if (!sleep_ready()) {
				dump_sleep_params(&mcp->flags);
				ctx->mcp->flags.sleep_mode.sleep_req = 0;
				MCDRV_DBG_ERROR(mcd, "MobiCore can't SLEEP!");
				return NOTIFY_BAD;
			}
		}
		break;
	case PM_POST_SUSPEND:
		MCDRV_DBG(mcd, "Resume MobiCore system!");
		ctx->mcp->flags.sleep_mode.sleep_req = 0;
		break;
	default:
		break;
	}
	return 0;
}
inline void hotplugap_boostpulse(void)
{
	unsigned int online_cpus;
	online_cpus = num_online_cpus();
	
	if (!isEnabled)
		return;

	if (unlikely(flags & (EARLYSUSPEND_ACTIVE
		| HOTPLUG_DISABLED)))
		return;

	if (!(flags & BOOSTPULSE_ACTIVE) && (max_online_cpus > online_cpus)) {
		flags |= BOOSTPULSE_ACTIVE;
		/*
		 * If there are less than 2 CPUs online, then online
		 * an additional CPU, otherwise check for any pending
		 * offlines, cancel them and pause for 2 seconds.
		 * Either way, we don't allow any cpu_down()
		 * whilst the user is interacting with the device.
		 */
		if (likely(online_cpus < 2)) {
			cancel_delayed_work_sync(&aphotplug_offline_work);
			flags |= HOTPLUG_PAUSED;
			schedule_work_on(0, &hotplug_online_single_work);
			schedule_delayed_work(&hotplug_unpause_work, HZ );
		} else {
#if DEBUG
			pr_info("auto_hotplug: %s: %d CPUs online\n", __func__, num_online_cpus());
#endif
			if (delayed_work_pending(&aphotplug_offline_work)) {
#if DEBUG
				pr_info("auto_hotplug: %s: Cancelling aphotplug_offline_work\n", __func__);
#endif
				cancel_delayed_work(&aphotplug_offline_work);
				flags |= HOTPLUG_PAUSED;
				schedule_delayed_work(&hotplug_unpause_work, HZ );
				schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
			}
		}
	}
}
Ejemplo n.º 12
0
static void wg_input_event(struct input_handle *handle, unsigned int type,
				unsigned int code, int value)
{

#if WG_DEBUG
	pr_info("wg: code: %s|%u, val: %i\n",
		((code==ABS_MT_POSITION_X) ? "X" :
		(code==ABS_MT_POSITION_Y) ? "Y" :
		(code==ABS_MT_TRACKING_ID) ? "ID" :
		"undef"), code, value);
#endif
	if (code == ABS_MT_SLOT) {
		sweep2wake_reset();
		doubletap2wake_reset();
		return;
	}

	if (code == ABS_MT_TRACKING_ID && value == -1) {
		sweep2wake_reset();
		touch_cnt = true;
		schedule_work_on(0, &dt2w_input_work);
		schedule_work_on(0, &dt2s_input_work);
		return;
	}

	if (code == ABS_MT_POSITION_X) {
		touch_x = value;
		touch_x_called = true;
	}

	if (code == ABS_MT_POSITION_Y) {
		touch_y = value;
		touch_y_called = true;
	}

	if (touch_x_called && touch_y_called) {
		touch_x_called = false;
		touch_y_called = false;
		schedule_work_on(0, &s2w_input_work);
		schedule_work_on(0, &s2s_input_work);
	} else if (!flg_power_suspended && touch_x_called && !touch_y_called) {
		touch_x_called = false;
		touch_y_called = false;
		schedule_work_on(0, &s2w_input_work);
		schedule_work_on(0, &s2s_input_work);
	}
}
Ejemplo n.º 13
0
static ssize_t store_run_queue_avg(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf, size_t count)
{
	if (buf[0] == '1')
	{	
		if (is_dual_locked != 0)
			return count;

		cpufreq_set_limit(DVFS_START);
#if 1
		cpu_hotplug_driver_lock();
		if (cpu_is_offline(NON_BOOT_CPU))
		{	
			cpu_up(NON_BOOT_CPU); // it takes 60ms
		}
		cpu_hotplug_driver_unlock();
#else	
		if (cpu_is_offline(NON_BOOT_CPU))
			schedule_work_on(0, &dvfs_hotplug_work);
#endif
		stall_mpdecision = 1;	
		is_dual_locked = 1;
	}
	else
	{
		if (is_dual_locked == 0)
		{
			stall_mpdecision = 0;
			return count;
		}

		cpufreq_set_limit(DVFS_STOP);

		stall_mpdecision = 0;
		is_dual_locked = 0;
	}

	return count;
}
Ejemplo n.º 14
0
static int jobs_init(void)
{
	unsigned int cpus_num = num_online_cpus();
	unsigned int cpu;

	if (cpus_num < MAX_JOBS) {
		pr_err("Need %u more CPUs!!!\n", MAX_JOBS - cpus_num);
		return -EINVAL;
	}

	/* we are interested only in working cpus */
	for_each_online_cpu(cpu) {
		if (jobs_num >= MAX_JOBS)
			break;

		pr_info("Scheduling job #%u\n", cpu);

		schedule_work_on(cpu, jobs[jobs_num++]);
	}

	return 0;
}
Ejemplo n.º 15
0
static void handle_poweroff(int key, struct tty_struct *tty)
{
	/* run sysrq poweroff on boot cpu */
	schedule_work_on(first_cpu(cpu_online_map), &poweroff_work);
}
Ejemplo n.º 16
0
static enum hrtimer_restart secos_booster_hrtimer_fn(struct hrtimer *timer)
{
	schedule_work_on(0, &stopwq);

	return HRTIMER_NORESTART;
}
Ejemplo n.º 17
0
static void handle_poweroff(int key)
{
	/* run sysrq poweroff on boot cpu */
	schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
}
Ejemplo n.º 18
0
static void hotplug_decision_work_fn(struct work_struct *work)
{
	unsigned int running, disable_load, sampling_rate, avg_running = 0;
	unsigned int online_cpus, available_cpus, i, j;
	bool hotplug_flag_on = false;
	bool hotplug_flag_off = false;
#if DEBUG
	unsigned int k;
#endif
	if (!isEnabled)
		return;
		
	online_cpus = num_online_cpus();
	available_cpus = CPUS_AVAILABLE;
	disable_load = DISABLE_LOAD_THRESHOLD; // * online_cpus;
	//enable_load = ENABLE_LOAD_THRESHOLD; // * online_cpus;
	/*
	 * Multiply nr_running() by 100 so we don't have to
	 * use fp division to get the average.
	 */
	running = nr_running() * 100;

	history[index] = running;

#if DEBUG
	pr_info("online_cpus is: %d\n", online_cpus);
	//pr_info("enable_load is: %d\n", enable_load);
	pr_info("disable_load is: %d\n", disable_load);
	pr_info("index is: %d\n", index);
	pr_info("running is: %d\n", running);
#endif

	/*
	 * Use a circular buffer to calculate the average load
	 * over the sampling periods.
	 * This will absorb load spikes of short duration where
	 * we don't want additional cores to be onlined because
	 * the cpufreq driver should take care of those load spikes.
	 */
	for (i = 0, j = index; i < SAMPLING_PERIODS; i++, j--) {
		avg_running += history[j];
		if (unlikely(j == 0))
			j = INDEX_MAX_VALUE;
	}

	/*
	 * If we are at the end of the buffer, return to the beginning.
	 */
	if (unlikely(index++ == INDEX_MAX_VALUE))
		index = 0;

#if DEBUG
	pr_info("array contents: ");
	for (k = 0; k < SAMPLING_PERIODS; k++) {
		 pr_info("%d: %d\t",k, history[k]);
	}
	pr_info("\n");
	pr_info("avg_running before division: %d\n", avg_running);
#endif

	avg_running = avg_running / SAMPLING_PERIODS;

#if DEBUG
	pr_info("average_running is: %d\n", avg_running);
#endif

	if (likely(!(flags & HOTPLUG_DISABLED))) {
		int cpu;
		for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++)
		{
			if (avg_running >= enable_load[cpu] && (!cpu_online(cpu)))
			{
				hotplug_cpu_single_on[cpu] = 1;
				hotplug_flag_on = true;
			}
			else if (avg_running < enable_load[cpu] && (cpu_online(cpu)))
			{
				hotplug_cpu_single_off[cpu] = 1;
				hotplug_flag_off = true;
			}
		}

		if (unlikely((avg_running >= ENABLE_ALL_LOAD_THRESHOLD) && (online_cpus < available_cpus))) {
			pr_info("auto_hotplug: Onlining all CPUs, avg running: %d\n", avg_running);
			/*
			 * Flush any delayed offlining work from the workqueue.
			 * No point in having expensive unnecessary hotplug transitions.
			 * We still online after flushing, because load is high enough to
			 * warrant it.
			 * We set the paused flag so the sampling can continue but no more
			 * hotplug events will occur.
			 */
			flags |= HOTPLUG_PAUSED;
			if (delayed_work_pending(&aphotplug_offline_work))
				cancel_delayed_work(&aphotplug_offline_work);
			hotplug_flag_on = false;
			schedule_work_on(0, &hotplug_online_all_work);
			return;
		} else if (flags & HOTPLUG_PAUSED) {
			schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
			return;
		} else if (hotplug_flag_on) {
#if DEBUG
			pr_info("auto_hotplug: Onlining single CPU, avg running: %d\n", avg_running);
#endif
			if (delayed_work_pending(&aphotplug_offline_work))
				cancel_delayed_work(&aphotplug_offline_work);
			schedule_work_on(0, &hotplug_online_single_work);
			return;
		} else if (hotplug_flag_off) {
			/* Only queue a cpu_down() if there isn't one already pending */
			if (!(delayed_work_pending(&aphotplug_offline_work))) {
#if DEBUG
				pr_info("auto_hotplug: Offlining CPU, avg running: %d\n", avg_running);
#endif
				hotplug_flag_off = false;
				schedule_delayed_work_on(0, &aphotplug_offline_work, HZ);
			}
			/* If boostpulse is active, clear the flags */
			if (flags & BOOSTPULSE_ACTIVE) {
				flags &= ~BOOSTPULSE_ACTIVE;
#if DEBUG
				pr_info("auto_hotplug: Clearing boostpulse flags\n");
#endif
			}
		}
	}

	/*
	 * Reduce the sampling rate dynamically based on online cpus.
	 */
	sampling_rate = MIN_SAMPLING_RATE * (online_cpus * online_cpus);
#if DEBUG
	pr_info("sampling_rate is: %d\n", jiffies_to_msecs(sampling_rate));
#endif
	schedule_delayed_work_on(0, &hotplug_decision_work, sampling_rate);

}