Ejemplo n.º 1
0
static irqreturn_t tmu_irq(int irq, void *id)
{
	struct tmu_info *info = id;
	unsigned int status;

	disable_irq_nosync(irq);

	status = __raw_readl(info->tmu_base + INTSTAT);

	if (status & INTSTAT_RISE0) {
		pr_info("Throttling interrupt occured!!!!\n");
		__raw_writel(INTCLEAR_RISE0, info->tmu_base + INTCLEAR);
		info->tmu_state = TMU_STATUS_THROTTLED;
		queue_delayed_work_on(0, tmu_monitor_wq,
				&info->polling, usecs_to_jiffies(500 * 1000));
	} else if (status & INTSTAT_RISE1) {
		pr_info("Warning interrupt occured!!!!\n");
		__raw_writel(INTCLEAR_RISE1, info->tmu_base + INTCLEAR);
		info->tmu_state = TMU_STATUS_WARNING;
		queue_delayed_work_on(0, tmu_monitor_wq,
				&info->polling, usecs_to_jiffies(500 * 1000));
	} else if (status & INTSTAT_RISE2) {
		pr_info("Tripping interrupt occured!!!!\n");
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR_RISE2, info->tmu_base + INTCLEAR);
		tmu_tripped_cb();
	} else {
		pr_err("%s: TMU interrupt error\n", __func__);
		return -ENODEV;
	}

	return IRQ_HANDLED;
}
Ejemplo n.º 2
0
static int lcd_notifier_callback(struct notifier_block *nb,
                                 unsigned long event, void *data)
{
       switch (event) {
       case LCD_EVENT_ON_START:
			isSuspended = false;
			if(tplug_hp_enabled)
				queue_delayed_work_on(0, tplug_wq, &tplug_work,
								msecs_to_jiffies(sampling_time));
			else
				queue_delayed_work_on(0, tplug_resume_wq, &tplug_resume_work,
		                      msecs_to_jiffies(10));
			pr_info("thunderplug : resume called\n");
               break;
       case LCD_EVENT_ON_END:
               break;
       case LCD_EVENT_OFF_START:
               break;
       case LCD_EVENT_OFF_END:
			isSuspended = true;
			pr_info("thunderplug : suspend called\n");
               break;
       default:
               break;
       }

       return 0;
}
Ejemplo n.º 3
0
static void cpuload_nothing(struct work_struct *work)
{
	high_transition = 0;
	if (hybrid == 1)
		queue_delayed_work_on(0, cpuload_wq, &cpuload_work, 0);
	else
		queue_delayed_work_on(0, cpuload_wq, &dummy_work, 200);
}
Ejemplo n.º 4
0
static int __init s5pv310_busfreq_init(void)
{
	int ret,i;

#ifdef HAVE_DAC
	s5pv310_dac_init();
#endif
	ret = busfreq_ppmu_init();
	if (ret)
		return ret;

	p_idx = LV_0;
#ifdef CONFIG_REGULATOR
	int_regulator = regulator_get(NULL, "vdd_int");
	if (IS_ERR(int_regulator)) {
		printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
		goto out;
	}
#endif
	busfreq_wq = create_workqueue("busfreq monitor");
	if (!busfreq_wq) {
		printk(KERN_ERR "Creation of busfreq work failed\n");
		return -EFAULT;
	}
	INIT_DELAYED_WORK(&busfreq_work, busfreq_timer);

	sampling_rate = CHECK_DELAY;
	up_threshold = UP_THRESHOLD_DEFAULT;

	for(i =0; i < LV_END; i++) {
		time_in_state[i]=0;
	}

	hybrid = 1;

	queue_delayed_work_on(0, busfreq_wq, &busfreq_work, CHECK_DELAY);

	high_transition = 0;
	trans_load = DEFAULT_CPU_LOAD_FOR_TRANSITION;

	cpuload_wq = create_workqueue("cpuload monitor");
	if (!cpuload_wq) {
		printk(KERN_ERR "Creation of cpuload monitor work failed\n");
		return -EFAULT;
	}
	INIT_DELAYED_WORK_DEFERRABLE(&cpuload_work, cpuload_timer);
	INIT_DELAYED_WORK_DEFERRABLE(&dummy_work, cpuload_nothing);

	queue_delayed_work_on(0, cpuload_wq, &dummy_work, HZ);

	return 0;
out:
	printk(KERN_ERR "%s: failed initialization\n", __func__);
	return -EINVAL;
}
Ejemplo n.º 5
0
static void cpuload_timer(struct work_struct *work)
{
	unsigned int i, avg_load, max_load, load = 0;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
						tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
						tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if(wall_time < idle_time)
			idle_time = wall_time;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		if(tmp_info->load > load)
			load = tmp_info->load;
	}
	max_load=load;
	avg_load = load / num_online_cpus();

	if (high_transition == 0) {
		if (max_load > trans_load) {
			cancel_delayed_work_sync(&busfreq_work);
			high_transition = 1;
			sampling_rate = HZ/25;  //40ms
		}
	} else {
		if (max_load <= trans_load) {
			cancel_delayed_work_sync(&busfreq_work);
			high_transition = 0;
			sampling_rate = HZ/10; //100ms
		}
	}

	queue_delayed_work_on(0, busfreq_wq, &busfreq_work, 0);

	if (hybrid == 1)
		queue_delayed_work_on(0, cpuload_wq, &cpuload_work, HZ/25);
	else
		queue_delayed_work_on(0, cpuload_wq, &dummy_work, HZ);

}
Ejemplo n.º 6
0
static void tplug_es_resume_work(struct early_suspend *p) {
	isSuspended = false;
#ifdef CONFIG_SCHED_HMP
	if(tplug_hp_style==1)
#else
	if(tplug_hp_enabled)
#endif
	queue_delayed_work_on(0, tplug_wq, &tplug_work,
					msecs_to_jiffies(sampling_time));
	else
		queue_delayed_work_on(0, tplug_resume_wq, &tplug_resume_work,
		            msecs_to_jiffies(10));
	pr_info("thunderplug : resume called\n");
}
Ejemplo n.º 7
0
static void tplug_input_event(struct input_handle *handle, unsigned int type,
		unsigned int code, int value)
{
	if (type == EV_KEY && code == BTN_TOUCH) {
		if(DEBUG)
			pr_info("%s : type = %d, code = %d, value = %d\n", THUNDERPLUG, type, code, value);
		if(value == 0) {
			stop_boost = 1;
			if(DEBUG)
				pr_info("%s: stopping boost\n", THUNDERPLUG);
		}
		else {
			stop_boost = 0;
			if(DEBUG)
				pr_info("%s: starting boost\n", THUNDERPLUG);
		}
	}
#ifdef CONFIG_SCHED_HMP
    if ((type == EV_KEY) && (code == BTN_TOUCH) && (value == 1)
		&& touch_boost_enabled == 1)
#else
	if ((type == EV_KEY) && (code == BTN_TOUCH) && (value == 1)
		&& touch_boost_enabled == 1)
#endif
	{
		if(DEBUG)
			pr_info("%s : touch boost\n", THUNDERPLUG);
		queue_delayed_work_on(0, tplug_boost_wq, &tplug_boost,
			msecs_to_jiffies(0));
	}
}
Ejemplo n.º 8
0
static void watchdog_start(void)
{
	unsigned int val;
	unsigned long flags;

	spin_lock_irqsave(&wdt_lock, flags);

	/* set to PCLK / 256 / 128 */
	val = S3C2410_WTCON_DIV128;
	val |= S3C2410_WTCON_PRESCALE(255);
	writel(val, S3C2410_WTCON);

	/* program initial count */
	writel(watchdog_reset * TPS, S3C2410_WTCNT);
	writel(watchdog_reset * TPS, S3C2410_WTDAT);

	/* start timer */
	val |= S3C2410_WTCON_RSTEN | S3C2410_WTCON_ENABLE;
	writel(val, S3C2410_WTCON);
	spin_unlock_irqrestore(&wdt_lock, flags);

	/* make sure we're ready to pet the dog */
#if defined(PET_BY_WORKQUEUE)
	queue_delayed_work_on(0, watchdog_wq, &watchdog_work,
			      watchdog_pet * HZ);
#elif defined(PET_BY_DIRECT_TIMER)
	pet_watchdog_timer.expires = jiffies + watchdog_pet * HZ;
	add_timer_on(&pet_watchdog_timer, 0);
#else
	hrtimer_start(&watchdog_timer,
		      ktime_set(watchdog_pet, 0), HRTIMER_MODE_REL);
#endif
}
Ejemplo n.º 9
0
static void watchdog_workfunc(struct work_struct *work)
{
/*	pr_err("%s kicking...\n", __func__); */
	writel(watchdog_reset * TPS, S3C2410_WTCNT);
	queue_delayed_work_on(0, watchdog_wq, &watchdog_work,
			      watchdog_pet * HZ);
}
Ejemplo n.º 10
0
int __init intelli_plug_init(void)
{
	int rc;

	//pr_info("intelli_plug: scheduler delay is: %d\n", delay);
	pr_info("intelli_plug: version %d.%d by faux123\n",
		 INTELLI_PLUG_MAJOR_VERSION,
		 INTELLI_PLUG_MINOR_VERSION);

	rc = input_register_handler(&intelli_plug_input_handler);
#ifdef CONFIG_POWERSUSPEND
	register_power_suspend(&intelli_plug_power_suspend_driver);
#endif

	intelliplug_wq = alloc_workqueue("intelliplug",
				WQ_HIGHPRI | WQ_UNBOUND, 1);
	intelliplug_boost_wq = alloc_workqueue("iplug_boost",
				WQ_HIGHPRI | WQ_UNBOUND, 1);
	INIT_DELAYED_WORK(&intelli_plug_work, intelli_plug_work_fn);
	INIT_DELAYED_WORK(&intelli_plug_boost, intelli_plug_boost_fn);
	queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
		msecs_to_jiffies(10));

	return 0;
}
Ejemplo n.º 11
0
static void do_input_boost(struct work_struct *work)
{
	unsigned int i, ret, freq;
	struct cpu_sync *i_sync_info;
	struct cpufreq_policy policy;

	for_each_online_cpu(i) {

		i_sync_info = &per_cpu(sync_info, i);
		ret = cpufreq_get_policy(&policy, i);
		if (ret)
			continue;

		// ensure, touch boost freq does never exceed max scaling freq
		if (input_boost_freq > policy.max)
			freq = policy.max;
		else
			freq = input_boost_freq;

		if (policy.cur >= freq)
			continue;

		cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
		i_sync_info->input_boost_min = freq;
		cpufreq_update_policy(i);
		queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
			&i_sync_info->input_boost_rem,
			msecs_to_jiffies(input_boost_ms));
	}
}
Ejemplo n.º 12
0
static void __cpuinit intelli_plug_resume(struct early_suspend *handler)
#endif
{
	int num_of_active_cores;
	int i;

	mutex_lock(&intelli_plug_mutex);
	/* keep cores awake long enough for faster wake up */
	persist_count = BUSY_PERSISTENCE;
	suspended = false;
	mutex_unlock(&intelli_plug_mutex);

	/* wake up everyone */
	num_of_active_cores = num_possible_cpus();

	for (i = 1; i < num_of_active_cores; i++) {
		cpu_up(i);
	}

	screen_off_limit(false);
	wakeup_boost();

	queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
		msecs_to_jiffies(10));
}
Ejemplo n.º 13
0
static ssize_t __ref thunderplug_hp_style_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
{
	int val, last_val;
	sscanf(buf, "%d", &val);
	last_val = tplug_hp_style;
	switch(val)
	{
		case HOTPLUG_PERCORE:
		case HOTPLUG_SCHED:
			   tplug_hp_style = val;
		break;
		default:
			pr_info("%s : invalid choice\n", THUNDERPLUG);
		break;
	}

	if(tplug_hp_style == HOTPLUG_PERCORE && tplug_hp_style != last_val) {
	    pr_info("%s: Switching to Per-core hotplug model\n", THUNDERPLUG);
	    sched_set_boost(DISABLED);
		queue_delayed_work_on(0, tplug_wq, &tplug_work,
							msecs_to_jiffies(sampling_time));
	}
	else if(tplug_hp_style==2) {
	    pr_info("%s: Switching to sched based hotplug model\n", THUNDERPLUG);
	    set_sched_profile(tplug_sched_mode);
	}

	return count;
}
Ejemplo n.º 14
0
static void do_input_boost(struct work_struct *work)
{
	unsigned int i, ret;
	struct cpu_sync *i_sync_info;
	struct cpufreq_policy policy;

	get_online_cpus();
	for_each_online_cpu(i) {

		i_sync_info = &per_cpu(sync_info, i);
		ret = cpufreq_get_policy(&policy, i);
		if (ret)
			continue;
		if (policy.cur >= input_boost_freq)
			continue;

		cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
		i_sync_info->input_boost_min = input_boost_freq;
		cpufreq_update_policy(i);
		queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
			&i_sync_info->input_boost_rem,
			msecs_to_jiffies(input_boost_ms));
	}
	put_online_cpus();
}
Ejemplo n.º 15
0
/*---------------------------------------------------------------------------*/
int xio_schedwork_add(struct xio_schedwork *sched_work,
		      int msec_duration, void *data,
		      void (*timer_fn)(void *data),
		      xio_schedwork_handle_t *handle_out)
{
	struct xio_delayed_work *xdwork;
	struct xio_context *ctx = sched_work->ctx;
	unsigned long delay_jiffies;

	xdwork = kmalloc(sizeof(*xdwork), GFP_KERNEL);
	if (!xdwork) {
		ERROR_LOG("kmalloc failed.\n");
		return -1;
	}

	xdwork->data = data;
	xdwork->timer_fn = timer_fn;
	xdwork->ctx = ctx;

	INIT_DELAYED_WORK(&xdwork->dwork, xio_work_callback);

	delay_jiffies = msecs_to_jiffies(msec_duration);

	/* queue the work */
	if (!queue_delayed_work_on(ctx->cpuid, sched_work->workqueue,
	                           &xdwork->dwork, delay_jiffies)) {
		ERROR_LOG("work already queued?.\n");
		return -1;
	}

	/* for cancellation */
	*handle_out = xdwork;

	return 0;
}
Ejemplo n.º 16
0
static void do_input_boost(struct work_struct *work)
{
	unsigned int i, ret;
	struct cpufreq_policy policy;

	/* 
	 * to avoid concurrency issues we cancel rem_input_boost
	 * and wait for it to finish the work
	 */
	cancel_delayed_work_sync(&rem_input_boost);

	for_each_online_cpu(i)
	{
		ret = cpufreq_get_policy(&policy, i);
		if (ret)
			continue;

		if (policy.cur < input_boost_freq)
		{
			boost_freq_buf = input_boost_freq;
			cpufreq_update_policy(i);
		}
	}

	queue_delayed_work_on(0, input_boost_wq, 
		&rem_input_boost, msecs_to_jiffies(30));
}
Ejemplo n.º 17
0
/*
 * Every DELAY, check the average load of online CPUs. If the average load
 * is above up_threshold bring online one more CPU if up_timer has expired.
 * If the average load is below up_threshold offline one more CPU if the
 * down_timer has expired.
 */
static __ref void load_timer(struct work_struct *work)
{
	unsigned int cpu;
	unsigned int avg_load = 0;
	
	if (down_timer < down_timer_cnt)
		down_timer++;

	if (up_timer < up_timer_cnt)
		up_timer++;
	
	for_each_online_cpu(cpu)
		avg_load += cpufreq_quick_get_util(cpu);
		
	avg_load /= num_online_cpus();
	
#if DEBUG
	pr_debug("%s: avg_load: %u, num_online_cpus: %u\n", __func__, avg_load, num_online_cpus());
	pr_debug("%s: up_timer: %u, down_timer: %u\n", __func__, up_timer, down_timer);
#endif

	if (avg_load >= up_threshold && up_timer >= up_timer_cnt)
		up_one();
	else if (down_timer >= down_timer_cnt)
		down_one();

	queue_delayed_work_on(0, dyn_workq, &dyn_work, delay);
}
Ejemplo n.º 18
0
static void __cpuinit intelli_plug_resume(struct early_suspend *handler)
#endif
{

	if (intelli_plug_active) {
		int cpu;

		mutex_lock(&intelli_plug_mutex);
		/* keep cores awake long enough for faster wake up */
		persist_count = BUSY_PERSISTENCE;
		suspended = false;
		mutex_unlock(&intelli_plug_mutex);

		for_each_possible_cpu(cpu) {
			if (cpu == 0)
				continue;
			cpu_up(cpu);
		}

		wakeup_boost();
		screen_off_limit(false);
	}
	queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
		msecs_to_jiffies(10));
}
Ejemplo n.º 19
0
static void __cpuinit sleepy_plug_work_fn(struct work_struct *work)
{
	enum mp_decisions decision = DO_NOTHING;

	if (sleepy_plug_active == 1) {
		// detect artificial loads or constant loads
		// using msm rqstats

		decision = mp_decision();
#ifdef DEBUG_SLEEPY_PLUG
		pr_info("decision: %d\n",decision);
#endif
		if (!suspended) {
			if (decision == CPU_UP) {
				cpu_up(1);
				sampling_time = BUSY_SAMPLING_MS;
			} else if(decision == CPU_DOWN){
				cpu_down(1);
				sampling_time = DEF_SAMPLING_MS;
			} else if(decision == DO_NOTHING)
				sampling_time = DEF_SAMPLING_MS;
		}
#ifdef DEBUG_SLEEPY_PLUG
		else
			pr_info("sleepy_plug is suspened!\n");
#endif
	}
	queue_delayed_work_on(0, sleepy_plug_wq, &sleepy_plug_work,
		msecs_to_jiffies(sampling_time));
}
Ejemplo n.º 20
0
static irqreturn_t tmu_irq(int irq, void *id)
{
	struct tmu_info *info = id;
	unsigned int status;

	disable_irq_nosync(irq);

	status = __raw_readl(info->tmu_base + INTSTAT);
	/* To handle multiple interrupt pending,
	* interrupt by high temperature are serviced with priority.
	*/
	if (status & INTSTAT_RISE1) {
		dev_info(info->dev, "Tripping interrupt\n");
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR_RISE1, info->tmu_base + INTCLEAR);
	} else if (status & INTSTAT_RISE0) {
		dev_info(info->dev, "Throttling interrupt\n");
		__raw_writel(INTCLEAR_RISE0, info->tmu_base + INTCLEAR);
		info->tmu_state = TMU_STATUS_THROTTLED;
	} else {
		dev_err(info->dev, "%s: TMU interrupt error. INTSTAT : %x\n", __func__, status);
		__raw_writel(status, info->tmu_base + INTCLEAR);
		return IRQ_HANDLED;
	}

	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(0));

	return IRQ_HANDLED;
}
static void tmu_monitor(struct work_struct *work)
{
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct tmu_info *info =
		container_of(delayed_work, struct tmu_info, polling);
	struct tmu_data *data = info->dev->platform_data;
	int cur_temp;

	cur_temp = get_cur_temp(info);

	dev_dbg(info->dev, "Current: %dc, FLAG=%d\n", cur_temp, info->tmu_state);

	mutex_lock(&tmu_lock);
	switch (info->tmu_state) {
	case TMU_STATUS_NORMAL:
		exynos_thermal_unthrottle();
		enable_irq(info->irq);
		goto out;
	case TMU_STATUS_THROTTLED:
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
		else if (cur_temp > data->ts.stop_throttle)
			exynos_thermal_throttle();
		else
			info->tmu_state = TMU_STATUS_NORMAL;
		break;
	case TMU_STATUS_TRIPPED:
		if (cur_temp >= data->ts.start_emergency)
			panic("Emergency thermal shutdown: temp=%d\n",
			      cur_temp);
		if (cur_temp >= data->ts.start_tripping)
			pr_err("thermal tripped: temp=%d\n", cur_temp);
		else
			info->tmu_state = TMU_STATUS_THROTTLED;
		break;
	default:
		break;
	}

	/* Memory throttling */
	if (cur_temp >= data->ts.start_mem_throttle &&
		!info->mem_throttled) {
		set_refresh_period(FREQ_IN_PLL, info->auto_refresh_mem_throttle);
		info->mem_throttled = true;
		dev_dbg(info->dev, "set auto refresh period %dns\n",
				info->auto_refresh_mem_throttle);
	} else if (cur_temp <= data->ts.stop_mem_throttle &&
		info->mem_throttled) {
		set_refresh_period(FREQ_IN_PLL, info->auto_refresh_normal);
		info->mem_throttled = false;
		dev_dbg(info->dev, "set auto refresh period %dns\n",
				info->auto_refresh_normal);
	}

	queue_delayed_work_on(0, tmu_monitor_wq,
			      &info->polling, info->sampling_rate);
out:
	mutex_unlock(&tmu_lock);
}
Ejemplo n.º 22
0
static void apply_down_lock(unsigned int cpu)
{
	struct down_lock *dl = &per_cpu(lock_info, cpu);

	dl->locked = 1;
	queue_delayed_work_on(0, intelliplug_wq, &dl->lock_rem,
			      msecs_to_jiffies(down_lock_dur));
}
Ejemplo n.º 23
0
static inline int
QueueDelayedWorkOn(int cpu,
		   CommOSWorkQueue *wq,
		   CommOSWork *work,
		   unsigned long jif)
{
	return !queue_delayed_work_on(cpu, wq, work, jif) ? -1 : 0;
}
Ejemplo n.º 24
0
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	while (1) {
		wait_event(s->sync_wq, s->pending || kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (dest_policy.cur >= src_policy.cur) {
			pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
				 dest_cpu, dest_policy.cur,
				 src_cpu, src_policy.cur);
			continue;
		}

		if (sync_threshold && (dest_policy.cur >= sync_threshold))
			continue;

		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold)
			s->boost_min = min(sync_threshold, src_policy.cur);
		else
			s->boost_min = src_policy.cur;

		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
static void __ref intelli_plug_work_fn(struct work_struct *work)
{
	unsigned int nr_run_stat;
	unsigned int cpu_count = 0;
	unsigned int nr_cpus = 0;

	int i;

	if (intelli_plug_active) {
		nr_run_stat = calculate_thread_stats();
		update_per_cpu_stat();
#ifdef DEBUG_INTELLI_PLUG
		pr_info("nr_run_stat: %u\n", nr_run_stat);
#endif
		cpu_count = nr_run_stat;
		nr_cpus = num_online_cpus();

		if (!suspended) {

			if (persist_count > 0)
				persist_count--;

			switch (cpu_count) {
			case 1:
				if (persist_count == 0) {
					//take down everyone
					unplug_cpu(0);
				}
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 1: %u\n", persist_count);
#endif
				break;
			case 2:
				if (persist_count == 0)
					persist_count = DUAL_PERSISTENCE;
				if (nr_cpus < 2) {
					for (i = 1; i < cpu_count; i++)
						cpu_up(i);
				} else {
					unplug_cpu(1);
				}
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 2: %u\n", persist_count);
#endif
				break;
			default:
				pr_err("Run Stat Error: Bad value %u\n", nr_run_stat);
				break;
			}
		}
#ifdef DEBUG_INTELLI_PLUG
		else
			pr_info("intelli_plug is suspened!\n");
#endif
	}
	queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
		msecs_to_jiffies(sampling_time));
}
Ejemplo n.º 26
0
static void intelli_plug_input_event(struct input_handle *handle,
		unsigned int type, unsigned int code, int value)
{
#ifdef DEBUG_INTELLI_PLUG
	pr_info("intelli_plug touched!\n");
#endif
	queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_boost,
		msecs_to_jiffies(10));
}
static void run_boost_migration(unsigned int cpu)
{
	int dest_cpu = cpu;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	spin_lock_irqsave(&s->lock, flags);
	s->pending = false;
	src_cpu = s->src_cpu;
	spin_unlock_irqrestore(&s->lock, flags);

	ret = cpufreq_get_policy(&src_policy, src_cpu);
	if (ret)
		return;

	ret = cpufreq_get_policy(&dest_policy, dest_cpu);
	if (ret)
		return;

	if (src_policy.min == src_policy.cpuinfo.min_freq) {
		pr_debug("No sync. Source CPU%d@%dKHz at min freq\n",
				src_cpu, src_policy.cur);

		return;
	}

	cancel_delayed_work_sync(&s->boost_rem);
	if (sync_threshold)
		s->boost_min = min(sync_threshold, src_policy.cur);
	else
		s->boost_min = src_policy.cur;

	/* Force policy re-evaluation to trigger adjust notifier. */
	get_online_cpus();
	if (cpu_online(src_cpu))
		/*
		 * Send an unchanged policy update to the source
		 * CPU. Even though the policy isn't changed from
		 * its existing boosted or non-boosted state
		 * notifying the source CPU will let the governor
		 * know a boost happened on another CPU and that it
		 * should re-evaluate the frequency at the next timer
		 * event without interference from a min sample time.
		 */
		cpufreq_update_policy(src_cpu);
	if (cpu_online(dest_cpu)) {
		cpufreq_update_policy(dest_cpu);
		queue_delayed_work_on(dest_cpu, cpu_boost_wq,
			&s->boost_rem, msecs_to_jiffies(boost_ms));
	} else {
		s->boost_min = 0;
	}
	put_online_cpus();
}
Ejemplo n.º 28
0
void state_suspend(void)
{
	dprintk("%s: suspend called.\n", STATE_NOTIFIER);
	if (state_suspended || suspend_in_progress || !enabled)
		return;

	suspend_in_progress = true;

	queue_delayed_work_on(0, susp_wq, &suspend_work, 
		msecs_to_jiffies(suspend_defer_time * 1000));
}
static void cur_temp_monitor(struct work_struct *work)
{
	int cur_temp;
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct tmu_info *info =
		 container_of(delayed_work, struct tmu_info, monitor);

	cur_temp = get_cur_temp(info);
	pr_info("current temp = %d\n", cur_temp);
	queue_delayed_work_on(0, tmu_monitor_wq,
			&info->monitor, info->sampling_rate);
}
Ejemplo n.º 30
0
static void cur_temp_monitor(struct work_struct *work)
{
	unsigned char cur_temp;
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct tmu_info *info =
		 container_of(delayed_work, struct tmu_info, monitor);

	cur_temp = get_cur_temp(info);
	pr_info("current temp = %d\n", cur_temp);
	queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor,
			usecs_to_jiffies(1000 * 1000));
}