/**
 * usb_device_phy_en() - for enabling the 5V to usb gadget
 * @enable: to enabling the Phy for device.
 *
 * This function used to set the voltage for USB gadget mode.
 */
static void usb_device_phy_en(int enable)
{
	int volt = 0;
	int ret = -1;

	if (phy_enable_stat == enable)
		return;

	if (enable == USB_ENABLE) {
		wake_lock(&ab8500_musb_wakelock);
		ux500_pins_enable(usb_gpio_pins);
		clk_enable(sysclock);
		phy_enable_stat = USB_ENABLE;
		regulator_enable(musb_vape_supply);
		regulator_enable(musb_smps2_supply);

		/* Set Vintcore12 LDO to 1.3V */
		ret = regulator_set_voltage(musb_vintcore_supply,
						1300000, 1350000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set the Vintcore"
					" to 1.3V, ret=%d\n", ret);
		ret = regulator_set_optimum_mode(musb_vintcore_supply,
						 28000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set optimum mode"
					" (ret=%d)\n", ret);
		regulator_enable(musb_vintcore_supply);
		volt = regulator_get_voltage(musb_vintcore_supply);
		if ((volt != 1300000) && (volt != 1350000))
			printk(KERN_ERR "Vintcore is not"
					" set to 1.3V"
					" volt=%d\n", volt);
#ifdef	CONFIG_PM
		stm_musb_context(USB_ENABLE);
#endif

		/* Workaround for USB performance issue. */
		schedule_delayed_work_on(0,
				 &work_usb_workaround,
				 msecs_to_jiffies(USB_PROBE_DELAY));

		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
				DEVICE_NAME, 100);

		abx500_set_register_interruptible(device,
				AB8500_USB,
				AB8500_USB_PHY_CTRL_REG,
				AB8500_USB_DEVICE_ENABLE);
	} else { /* enable == USB_DISABLE */
		/*
		 * Workaround: Sometimes the DISCONNECT interrupt is
		 * not generated in musb_core. Force a disconnect if
		 * necessary before we power down the PHY.
		 */
		stm_musb_force_disconnect();

		if (boot_time_flag)
			boot_time_flag = USB_DISABLE;

		/*
		 * Workaround for bug31952 in ABB cut2.0. Write 0x1
		 * before disabling the PHY.
		 */
		abx500_set_register_interruptible(device, AB8500_USB,
			     AB8500_USB_PHY_CTRL_REG,
			     AB8500_USB_DEVICE_ENABLE);

		udelay(200);

		abx500_set_register_interruptible(device,
			AB8500_USB,
			AB8500_USB_PHY_CTRL_REG,
			AB8500_USB_DEVICE_DISABLE);
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
				DEVICE_NAME, 50);

		/* Workaround for USB performance issue. */
		cancel_delayed_work_sync(&work_usb_workaround);
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
					     "usb", 25);

		prcmu_release_usb_wakeup_state();
		phy_enable_stat = USB_DISABLE;
		regulator_disable(musb_vape_supply);
		regulator_disable(musb_smps2_supply);
		regulator_disable(musb_vintcore_supply);
		regulator_set_optimum_mode(musb_vintcore_supply, 0);
		/* Set Vintcore12 LDO to 0V to 1.35V */
		ret = regulator_set_voltage(musb_vintcore_supply,
						0000000, 1350000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set the Vintcore"
					" to 0V to 1.35V,"
					" ret=%d\n", ret);
		clk_disable(sysclock);
#ifdef CONFIG_PM
		stm_musb_context(USB_DISABLE);
#endif
		ux500_pins_disable(usb_gpio_pins);
		wake_unlock(&ab8500_musb_wakelock);
	}
}
示例#2
0
static void delayed_usecase_work(struct work_struct *work)
{
	unsigned long avg, load, trend, balance;
	bool inc_perf = false;
	bool dec_perf = false;
	u32 irqs_per_s;

	/* determine loadavg  */
	avg = determine_loadavg();
	hp_printk("loadavg = %lu lower th %lu upper th %lu\n",
					avg, lower_threshold, upper_threshold);

	/* determine instant load */
	load = determine_cpu_load();
	hp_printk("cpu instant load = %lu max %lu\n", load, max_instant);

	/* determine load trend */
	trend = determine_cpu_load_trend();
	hp_printk("cpu load trend = %lu min %lu unbal %lu\n",
					trend, min_trend, trend_unbalance);

	/* determine load balancing */
	balance = determine_cpu_balance_trend();
	hp_printk("load balancing trend = %lu min %lu\n",
					balance, max_unbalance);

	irqs_per_s = get_num_interrupts_per_s();

	/* Dont let configuration change in the middle of our calculations. */
	mutex_lock(&usecase_mutex);

	/* detect "instant" load increase */
	if (load > max_instant || irqs_per_s > exit_irq_per_s) {
		inc_perf = true;
	} else if (!usecase_conf[UX500_UC_USER].enable &&
			usecase_conf[UX500_UC_AUTO].enable) {
		/* detect high loadavg use case */
		if (avg > upper_threshold)
			inc_perf = true;
		/* detect idle use case */
		else if (trend < min_trend)
			dec_perf = true;
		/* detect unbalanced low cpu load use case */
		else if ((balance > max_unbalance) && (trend < trend_unbalance))
			dec_perf = true;
		/* detect low loadavg use case */
		else if (avg < lower_threshold)
			dec_perf = true;
		/* All user use cases disabled, current load not triggering
		 * any change.
		 */
		else if (user_config_updated)
			dec_perf = true;
	} else {
		dec_perf = true;
	}

	/*
	 * set_cpu_config() will not update the config unless it has been
	 * changed.
	 */
	if (dec_perf) {
		if (usecase_conf[UX500_UC_USER].enable)
			set_cpu_config(UX500_UC_USER);
		else if (usecase_conf[UX500_UC_AUTO].enable)
			set_cpu_config(UX500_UC_AUTO);
	} else if (inc_perf &&
		!(usecase_conf[UX500_UC_USER].enable &&
		usecase_conf[UX500_UC_USER].force_usecase)) {
		set_cpu_config(UX500_UC_NORMAL);
	}

	mutex_unlock(&usecase_mutex);

	/* reprogramm scheduled work */
	schedule_delayed_work_on(0, &work_usecase,
				msecs_to_jiffies(CPULOAD_MEAS_DELAY));

}
static void __cpuinit intelli_plug_work_fn(struct work_struct *work)
{
	unsigned int nr_run_stat;
	unsigned int cpu_count = 0;
	unsigned int nr_cpus = 0;

	int decision = 0;
	int i;

	if (intelli_plug_active == 1) {
		nr_run_stat = calculate_thread_stats();
#ifdef DEBUG_INTELLI_PLUG
		pr_info("nr_run_stat: %u\n", nr_run_stat);
#endif
		cpu_count = nr_run_stat;
		// detect artificial loads or constant loads
		// using msm rqstats
		nr_cpus = num_online_cpus();
		if (!eco_mode_active && (nr_cpus >= 1 && nr_cpus < 4)) {
			decision = mp_decision();
			if (decision) {
				switch (nr_cpus) {
				case 2:
					cpu_count = 3;
#ifdef DEBUG_INTELLI_PLUG
					pr_info("nr_run(2) => %u\n", nr_run_stat);
#endif
					break;
				case 3:
					cpu_count = 4;
#ifdef DEBUG_INTELLI_PLUG
					pr_info("nr_run(3) => %u\n", nr_run_stat);
#endif
					break;
				}
			}
		}
		/* it's busy.. lets help it a bit */
		if (cpu_count > 2) {
			if (busy_persist_count == 0) {
				sampling_time = BUSY_SAMPLING_MS;
				busy_persist_count = BUSY_PERSISTENCE;
			}
		} else {
			if (busy_persist_count > 0)
				busy_persist_count--;
			else
				sampling_time = DEF_SAMPLING_MS;
		}

		if (!suspended) {
			switch (cpu_count) {
			case 1:
				if (persist_count > 0)
					persist_count--;
				if (persist_count == 0) {
					//take down everyone
					for (i = 3; i > 0; i--)
						cpu_down(i);
				}
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 1: %u\n", persist_count);
#endif
				break;
			case 2:
				persist_count = DUAL_CORE_PERSISTENCE;
				if (!decision)
					persist_count = DUAL_CORE_PERSISTENCE / CPU_DOWN_FACTOR;
				if (nr_cpus < 2) {
					for (i = 1; i < cpu_count; i++)
						cpu_up(i);
				} else {
					for (i = 3; i >  1; i--)
						cpu_down(i);
				}
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 2: %u\n", persist_count);
#endif
				break;
			case 3:
				persist_count = TRI_CORE_PERSISTENCE;
				if (!decision)
					persist_count = TRI_CORE_PERSISTENCE / CPU_DOWN_FACTOR;
				if (nr_cpus < 3) {
					for (i = 1; i < cpu_count; i++)
						cpu_up(i);
				} else {
					for (i = 3; i > 2; i--)
						cpu_down(i);
				}
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 3: %u\n", persist_count);
#endif
				break;
			case 4:
				persist_count = QUAD_CORE_PERSISTENCE;
				if (!decision)
					persist_count = QUAD_CORE_PERSISTENCE / CPU_DOWN_FACTOR;
				if (nr_cpus < 4)
					for (i = 1; i < cpu_count; i++)
						cpu_up(i);
#ifdef DEBUG_INTELLI_PLUG
				pr_info("case 4: %u\n", persist_count);
#endif
				break;
			default:
				pr_err("Run Stat Error: Bad value %u\n", nr_run_stat);
				break;
			}
		}
#ifdef DEBUG_INTELLI_PLUG
		else
			pr_info("intelli_plug is suspened!\n");
#endif
	}
	schedule_delayed_work_on(0, &intelli_plug_work,
		msecs_to_jiffies(sampling_time));
}
示例#4
0
static inline void mtu3d_u3_ltssm_intr_handler(struct musb *musb, u32 dwLtssmValue)
{
	static u32 soft_conn_num = 0;

	if (dwLtssmValue & SS_DISABLE_INTR) {
		os_printk(K_INFO, "LTSSM: SS_DISABLE_INTR [%d] & Set SOFT_CONN=1\n", soft_conn_num++);
		//enable U2 link. after host reset, HS/FS EP0 configuration is applied in musb_g_reset
		os_clrmsk(U3D_SSUSB_U2_CTRL_0P, SSUSB_U2_PORT_PDN);
		os_setmsk(U3D_POWER_MANAGEMENT, SOFT_CONN);
		sts_ltssm = SS_DISABLE_INTR;
	}

	if (dwLtssmValue & ENTER_U0_INTR) {
		soft_conn_num = 0;
		//do not apply U3 EP0 setting again, if the speed is already U3
		//LTSSM may go to recovery and back to U0
		if (musb->g.speed != USB_SPEED_SUPER) {
			os_printk(K_INFO, "LTSSM: ENTER_U0_INTR %d\n", musb->g.speed);
			musb_conifg_ep0(musb);
		}
		cancel_delayed_work(&musb->check_ltssm_work);
		sts_ltssm = ENTER_U0_INTR;
	}

	if (dwLtssmValue & VBUS_FALL_INTR) {
		os_printk(K_INFO, "LTSSM: VBUS_FALL_INTR\n");
		mu3d_hal_pdn_ip_port(1, 1, 1, 1);
		mu3d_hal_u3dev_dis();
	}

	if (dwLtssmValue & VBUS_RISE_INTR) {
		os_printk(K_INFO, "LTSSM: VBUS_RISE_INTR\n");
		mu3d_hal_u3dev_en();
	}

	if (dwLtssmValue & ENTER_U3_INTR) {
		os_printk(K_INFO, "LTSSM: ENTER_U3_INTR\n");
		mu3d_hal_pdn_ip_port(0, 0, 1, 0);
		sts_ltssm = ENTER_U3_INTR;
	}

#ifndef POWER_SAVING_MODE
	if (dwLtssmValue & U3_RESUME_INTR) {
		os_printk(K_INFO, "LTSSM: RESUME_INTR\n");
		mu3d_hal_pdn_ip_port(1, 0, 1, 0);
		os_writel(U3D_LINK_POWER_CONTROL, os_readl(U3D_LINK_POWER_CONTROL) | UX_EXIT);
	}
#endif

	if (dwLtssmValue & EXIT_U3_INTR) {
		os_printk(K_INFO, "LTSSM: EXIT_U3_INTR\n");
		mu3d_hal_pdn_ip_port(1, 0, 1, 0);
		sts_ltssm = EXIT_U3_INTR;
	}

	/*7.5.12.2 Hot Reset Requirements
	* 1. A downstream port shall reset its Link Error Count as defined in Section 7.4.2.
	* 2. A downstream port shall reset its PM timers and the associated U1 and U2 timeout values to zero.
	* 3. The port Configuration information shall remain unchanged (refer to Section 8.4.6 for details).
	* 4. The port shall maintain its transmitter specifications defined in Table 6-10.
	* 5. The port shall maintain its low-impedance receiver termination (RRX-DC) defined in Table 6-13.
	*/
	if (dwLtssmValue & HOT_RST_INTR) {
		DEV_INT32 link_err_cnt;
		DEV_INT32 timeout_val;
		os_printk(K_INFO, "LTSSM: HOT_RST_INTR\n");
		/* Clear link error count */
		link_err_cnt=os_readl(U3D_LINK_ERR_COUNT);
		os_printk(K_INFO, "LTSSM: link_err_cnt=%x\n", link_err_cnt);
		os_writel(U3D_LINK_ERR_COUNT, CLR_LINK_ERR_CNT);

		/* Clear U1 & U2 Enable*/
		os_clrmsk(U3D_LINK_POWER_CONTROL, (SW_U1_ACCEPT_ENABLE|SW_U2_ACCEPT_ENABLE));

		musb->g.pwr_params.bU1Enabled = 0;
		musb->g.pwr_params.bU2Enabled = 0;

		/* Reset U1 & U2 timeout value*/
		timeout_val = os_readl(U3D_LINK_UX_INACT_TIMER);
		os_printk(K_INFO, "LTSSM: timer_val =%x\n", timeout_val);
		timeout_val &= ~ (U1_INACT_TIMEOUT_VALUE | DEV_U2_INACT_TIMEOUT_VALUE);
		os_writel(U3D_LINK_UX_INACT_TIMER, timeout_val);
	}

	if (dwLtssmValue & SS_INACTIVE_INTR) {
		os_printk(K_INFO, "LTSSM: SS_INACTIVE_INTR\n");
		sts_ltssm = SS_INACTIVE_INTR;
	}

	if (dwLtssmValue & RECOVERY_INTR) {
		os_printk(K_DEBUG, "LTSSM: RECOVERY_INTR\n");
		sts_ltssm = RECOVERY_INTR;
	}

	/* A completion of a Warm Reset shall result in the following.
	* 1. A downstream port shall reset its Link Error Count.
	* 2. Port configuration information of an upstream port shall be reset to default values. Refer to
	*	 Sections 8.4.5 and 8.4.6 for details.
	* 3. The PHY level variables (such as Rx equalization settings) shall be reinitialized or retrained.
	* 4. The LTSSM of a port shall transition to U0 through RxDetect and Polling.
	*/
	if (dwLtssmValue & WARM_RST_INTR) {
		DEV_INT32 link_err_cnt;
		os_printk(K_INFO, "LTSSM: WARM_RST_INTR\n");
		/* Clear link error count */
		link_err_cnt=os_readl(U3D_LINK_ERR_COUNT);
		os_printk(K_INFO, "LTSSM: link_err_cnt=%x\n", link_err_cnt);
		os_writel(U3D_LINK_ERR_COUNT, CLR_LINK_ERR_CNT);
	}

	if (dwLtssmValue & ENTER_U2_INTR) os_printk(K_DEBUG, "LTSSM: ENTER_U2_INTR\n");
	if (dwLtssmValue & ENTER_U1_INTR) os_printk(K_DEBUG, "LTSSM: ENTER_U1_INTR\n");
	if (dwLtssmValue & RXDET_SUCCESS_INTR) {
		/*create a delay work. This work will work after 0.5sec.
		If LTSSM state is still at RxDet. Clear USB3_EN and set again.*/
		os_printk(K_INFO, "LTSSM: RXDET_SUCCESS_INTR\n");
		sts_ltssm = RXDET_SUCCESS_INTR;
		schedule_delayed_work_on(0, &musb->check_ltssm_work, msecs_to_jiffies(1000));
	}
}
示例#5
0
static void hotplug_decision_work_fn(struct work_struct *work)
{
	unsigned int running, disable_load, sampling_rate, enable_load, avg_running, min_sampling_rate_jiffies = 0;
	unsigned int online_cpus, available_cpus, i, j;
	unsigned int k;

	online_cpus = num_online_cpus();
	available_cpus = CPUS_AVAILABLE;
	disable_load = disable_load_threshold * online_cpus;
	enable_load = enable_load_threshold * online_cpus;
	min_sampling_rate_jiffies = msecs_to_jiffies(min_sampling_rate);

	/*
	 * Multiply nr_running() by 100 so we don't have to
	 * use fp division to get the average.
	 */
	running = nr_running() * 100;

	history[index] = running;

	if (debug) {
	pr_info("online_cpus is: %d\n", online_cpus);
	pr_info("enable_load is: %d\n", enable_load);
	pr_info("disable_load is: %d\n", disable_load);
	pr_info("index is: %d\n", index);
	pr_info("running is: %d\n", running);
	}

	/*
	 * Use a circular buffer to calculate the average load
	 * over the sampling periods.
	 * This will absorb load spikes of short duration where
	 * we don't want additional cores to be onlined because
	 * the cpufreq driver should take care of those load spikes.
	 */
	for (i = 0, j = index; i < SAMPLING_PERIODS; i++, j--) {
		avg_running += history[j];
		if (unlikely(j == 0))
			j = INDEX_MAX_VALUE;
	}

	/*
	 * If we are at the end of the buffer, return to the beginning.
	 */
	if (unlikely(index++ == INDEX_MAX_VALUE))
		index = 0;

	if (debug) {
	pr_info("array contents: ");
	for (k = 0; k < SAMPLING_PERIODS; k++) {
		 pr_info("%d: %d\t",k, history[k]);
	}
	pr_info("\n");
	pr_info("avg_running before division: %d\n", avg_running);
	}

	avg_running = avg_running / SAMPLING_PERIODS;

	if (debug) {
	pr_info("average_running is: %d\n", avg_running);
	}

	if (likely(!(flags & HOTPLUG_DISABLED))) {
		if (unlikely((avg_running >= ENABLE_ALL_LOAD_THRESHOLD) && (online_cpus < available_cpus) && (max_online_cpus > online_cpus))) {
		if (debug) {
			pr_info("auto_hotplug: Onlining all CPUs, avg running: %d\n", avg_running);
			}
			/*
			 * Flush any delayed offlining work from the workqueue.
			 * No point in having expensive unnecessary hotplug transitions.
			 * We still online after flushing, because load is high enough to
			 * warrant it.
			 * We set the paused flag so the sampling can continue but no more
			 * hotplug events will occur.
			 */
			flags |= HOTPLUG_PAUSED;
			if (delayed_work_pending(&hotplug_offline_work))
				cancel_delayed_work(&hotplug_offline_work);
			schedule_work(&hotplug_online_all_work);
			return;
		} else if (flags & HOTPLUG_PAUSED) {
			schedule_delayed_work_on(0, &hotplug_decision_work, min_sampling_rate_jiffies);
			return;
		} else if ((avg_running >= enable_load) && (online_cpus < available_cpus) && (max_online_cpus > online_cpus)) {
			if (debug) {
			pr_info("auto_hotplug: Onlining single CPU, avg running: %d\n", avg_running);
			}
			if (delayed_work_pending(&hotplug_offline_work))
				cancel_delayed_work(&hotplug_offline_work);
			schedule_work(&hotplug_online_single_work);
			return;
		} else if ((avg_running <= disable_load) && (min_online_cpus < online_cpus)) {
			/* Only queue a cpu_down() if there isn't one already pending */
			if (!(delayed_work_pending(&hotplug_offline_work))) {
				if (online_cpus == 2 && avg_running < (disable_load/2)) {
				if (debug) {
					pr_info("auto_hotplug: Online CPUs = 2; Offlining CPU, avg running: %d\n", avg_running);
					}
					flags |= HOTPLUG_PAUSED;
					schedule_delayed_work_on(0, &hotplug_offline_work, min_sampling_rate_jiffies);
				} else if (online_cpus > 2) {
					if (debug) {
					pr_info("auto_hotplug: Offlining CPU, avg running: %d\n", avg_running);
					}
					schedule_delayed_work_on(0, &hotplug_offline_work, HZ);
				}
			}
			/* If boostpulse is active, clear the flags */
			if (flags & BOOSTPULSE_ACTIVE) {
				flags &= ~BOOSTPULSE_ACTIVE;
				if (debug) {
				pr_info("auto_hotplug: Clearing boostpulse flags\n");
				}
			}
		}
	}

	/*
	 * Reduce the sampling rate dynamically based on online cpus.
	 */
	sampling_rate = min_sampling_rate_jiffies * (online_cpus * online_cpus);
	if (debug) {
	pr_info("sampling_rate is: %d\n", jiffies_to_msecs(sampling_rate));
	}
	schedule_delayed_work_on(0, &hotplug_decision_work, sampling_rate);

}
static void __cpuinit check_tempk(struct work_struct *work)
{
	unsigned int new_freq;
	struct tsens_device tsens_dev;
	long temp = 0;
	int ret = 0;
	
	tsens_dev.sensor_num = kmsm_thermal_info.sensor_id;
	ret = tsens_get_temp(&tsens_dev, &temp);
	if (ret) {
		pr_debug("%s: Unable to read TSENS sensor %d\n",
				KBUILD_MODNAME, tsens_dev.sensor_num);
		goto reschedule;
	}
	//pr_alert("CHECK TEMP %lu-%d-%d\n", temp, kmsm_thermal_info.temp_limit_degC_start, kmsm_thermal_info.temp_limit_degC_stop);
	kmsm_thermal_info.current_temp = temp;
	
	if (temp >= kmsm_thermal_info.temp_limit_degC_start)
	{
		unsigned int i;
		if (!kmsm_thermal_info.isthrottling)
		{
			//prev_freq = cpufreq_get(0);
			thermal_get_freq_table();
			pr_alert("START KTHROTTLING - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
		}
		kmsm_thermal_info.isthrottling = 1;
		//policy = cpufreq_cpu_get(0);
		//__cpufreq_driver_target(policy, 1296000, CPUFREQ_RELATION_H);
		limit_idx -= kmsm_thermal_info.freq_steps_while_throttling;
		if (limit_idx < limit_idx_low)
			limit_idx = limit_idx_low;
		for (i = 0; i < num_online_cpus(); i++)
		{
			//pr_alert("KTHROTTLING LOOP - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
			if (cpu_online(i) && cpufreq_get(i) != table[limit_idx].frequency)
			{
				//pr_alert("KTHROTTLING LOOP IN IF - current temp = %lu - set point = %d\n", temp, kmsm_thermal_info.temp_limit_degC_start);
				//policy = NULL;
				//policy = cpufreq_cpu_get(i);
				//if (policy != NULL)
				//	__cpufreq_driver_target(policy, 1296000, CPUFREQ_RELATION_H);
				new_freq = table[limit_idx].frequency;
				do_kthermal(i, new_freq);
			}
		}
	}
	else if (kmsm_thermal_info.isthrottling && temp > kmsm_thermal_info.temp_limit_degC_stop && temp < kmsm_thermal_info.temp_limit_degC_start)
	{
		unsigned int i;
		for (i = 0; i < num_online_cpus(); i++)
		{
			if (cpu_online(i) && cpufreq_get(i) != table[limit_idx].frequency)
			{
				new_freq = table[limit_idx].frequency;
				do_kthermal(i, new_freq);
			}
		}
	}
	else if (kmsm_thermal_info.isthrottling && temp <= kmsm_thermal_info.temp_limit_degC_stop)
	{
		unsigned int i;
		bool stopThrottle = false;
		//policy = cpufreq_cpu_get(0);
		//if (prev_freq > 0)
		//	__cpufreq_driver_target(policy, prev_freq, CPUFREQ_RELATION_H);
		limit_idx += kmsm_thermal_info.freq_steps_while_throttling;
		if (limit_idx >= limit_idx_high)
		{
			limit_idx = limit_idx_high;
			kmsm_thermal_info.isthrottling = 0;
			stopThrottle = true;
			pr_alert("STOP KTHROTTLING - current temp = %lu\n", temp);
		}
		for (i = 0; i < num_online_cpus(); i++)
		{
			if (cpu_online(i))
			{
				//policy = NULL;
				//policy = cpufreq_cpu_get(i);
				//if (prev_freq > 0 && policy != NULL)
				//	__cpufreq_driver_target(policy, prev_freq, CPUFREQ_RELATION_H);
				//do_thermal(i, prev_freq);
				new_freq = table[limit_idx].frequency;
				do_kthermal(i, new_freq);
			}
		}
		if (stopThrottle)
			do_kthermal(0, 0);
	}

reschedule:
	schedule_delayed_work_on(0, &check_temp_workk,
			msecs_to_jiffies(kmsm_thermal_info.poll_speed));
}
示例#7
0
static void bq24196_irq_handler(void)
{
    printk("Enter %s:********************\n", __func__);
    
    schedule_delayed_work_on(0, &bq_data->irq_dwork, 0);
}
示例#8
0
static void max77665_poll_work_func(struct work_struct *work)
{
	struct max77665_charger *charger =
		container_of(work, struct max77665_charger, poll_dwork.work);
	struct power_supply *fuelgauge_ps
		= power_supply_get_by_name("fuelgauge");
	union power_supply_propval val;
	int battery_health = POWER_SUPPLY_HEALTH_GOOD;


	mutex_lock(&charger->mutex_t);
	
	if (fuelgauge_ps)
		if(fuelgauge_ps->get_property(fuelgauge_ps, POWER_SUPPLY_PROP_HEALTH, &val) == 0)
			battery_health = val.intval;

	if (charger->chg_status == CHG_STATUS_FAST ||
			charger->chg_status == CHG_STATUS_RECHG) {
		struct i2c_client *i2c = charger->iodev->i2c;
		u8 reg_data;

		if(max77665_read_reg(i2c, MAX77665_CHG_REG_CHG_DETAILS_01, &reg_data) >= 0) {
			if((reg_data & 0x0F) == 0x04) {
				charger->chg_status = CHG_STATUS_DONE;
			}
		}
	}

	if ( charger->cable_status == CABLE_TYPE_USB ||
			charger->cable_status == CABLE_TYPE_AC) {

		if(battery_health != POWER_SUPPLY_HEALTH_GOOD) {
			if (regulator_is_enabled(charger->ps)) {
				printk("----------battery unhealthy, disable charging\n");
				regulator_disable(charger->ps);
			}
		} else {
			if (regulator_is_enabled(charger->ps)) {
				if (charger->chg_status == CHG_STATUS_DONE && fuelgauge_ps) {
					int soc = 100;
					if(fuelgauge_ps->get_property(fuelgauge_ps, POWER_SUPPLY_PROP_CAPACITY, &val) == 0)
						soc = val.intval;
					if(soc <= 98) {
						regulator_disable(charger->ps);
						msleep(500);
						regulator_enable(charger->ps);
						charger->chg_status = CHG_STATUS_RECHG;
					}
				}
			} else {
				printk("----------battery healthy good, enable charging\n");
				regulator_enable(charger->ps);
			}
		}

		schedule_delayed_work_on(0, &charger->poll_dwork, TEMP_CHECK_DELAY);
	} else {
		if (regulator_is_enabled(charger->ps)) {
			printk("--------------charger remove, disable charging\n");
			regulator_disable(charger->ps);
		}
	}
	mutex_unlock(&charger->mutex_t);
}
static inline void dbs_timer_init(struct greenmax_info_s *this_greenmax) {
	int delay = get_timer_delay();

	INIT_DEFERRABLE_WORK(&this_greenmax->work, do_dbs_timer);
	schedule_delayed_work_on(this_greenmax->cpu, &this_greenmax->work, delay);
}
示例#10
0
static void __cpuinit intelli_plug_work_fn(struct work_struct *work)
{
	unsigned int nr_run_stat;

	if (intelli_plug_active == 1) {
		nr_run_stat = calculate_thread_stats();
		//pr_info("nr_run_stat: %u\n", nr_run_stat);

		if (!suspended) {
			switch (nr_run_stat) {
				case 1:
					if (persist_count > 0)
						persist_count--;
					if (num_online_cpus() == 2 && persist_count == 0)
						cpu_down(1);
					if (eco_mode_active) {
						cpu_down(3);
						cpu_down(2);
					}
					//pr_info("case 1: %u\n", persist_count);
					break;
				case 2:
					persist_count = DUAL_CORE_PERSISTENCE;
					if (num_online_cpus() == 1)
						cpu_up(1);
					else {
						cpu_down(3);
						cpu_down(2);
					}
					//pr_info("case 2: %u\n", persist_count);
					break;
				case 3:
					persist_count = TRI_CORE_PERSISTENCE;
					if (num_online_cpus() == 1)
						cpu_up(1);
					if (num_online_cpus() == 2)
						cpu_up(2);
					else
						cpu_down(3);
					//pr_info("case 3: %u\n", persist_count);
					break;
				case 4:
					persist_count = QUAD_CORE_PERSISTENCE;
					if (num_online_cpus() == 1)
						cpu_up(1);
					if (num_online_cpus() == 2)
						cpu_up(2);
					if (num_online_cpus() == 3)
						cpu_up(3);
					//pr_info("case 4: %u\n", persist_count);
					break;
				default:
					pr_err("Run Stat Error: Bad value %u\n", nr_run_stat);
					break;
			}
		} //else
			//pr_info("intelli_plug is suspened!\n");
	}
	schedule_delayed_work_on(0, &intelli_plug_work,
		msecs_to_jiffies(DEF_SAMPLING_MS));
}