static void usb_load(struct work_struct *work)
{
	int cpu;
	unsigned int num_irqs = 0;
	static unsigned int old_num_irqs = UINT_MAX;

	for_each_online_cpu(cpu)
		num_irqs += kstat_irqs_cpu(IRQ_DB8500_USBOTG, cpu);

	if ((num_irqs > old_num_irqs) &&
	    (num_irqs - old_num_irqs) > USB_LIMIT) {
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
					     "usb", 125);
		if (!usb_pm_qos_is_latency_0) {
			usb_pm_qos_latency =
			pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 0);
			usb_pm_qos_is_latency_0 = true;
		}
	} else {
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
						"usb", 25);
		if (usb_pm_qos_is_latency_0) {
			pm_qos_remove_request(usb_pm_qos_latency);
			usb_pm_qos_is_latency_0 = false;
		}
	}
	old_num_irqs = num_irqs;

	schedule_delayed_work_on(0,
				 &work_usb_workaround,
				 msecs_to_jiffies(USB_PROBE_DELAY));
}
Exemplo n.º 2
0
static ssize_t cpufreq_min_limit_store(struct kobject *kobj,
					struct kobj_attribute *attr,
					const char *buf, size_t n)
{
	int val;
	unsigned int cpufreq_level;
	ssize_t ret = -EINVAL;
	int cpu;

	if (sscanf(buf, "%d", &val) != 1) {
		printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__);
		goto out;
	}

	if (val == -1) { /* Unlock request */
		if (cpufreq_min_limit_val != -1) {
			/* Reset lock value to default */
 			cpufreq_min_limit_val = -1;

			/* Update PRCMU QOS value to default */
			prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					"power", PRCMU_QOS_DEFAULT_VALUE);

			/* Clear replacement flag */
			min_replacement = 0;
		} else /* Already unlocked */
			printk(KERN_ERR "%s: Unlock request is ignored\n",
				__func__);
	} else { /* Lock request */
		if (get_cpufreq_level((unsigned int)val, &cpufreq_level, DVFS_MIN_LOCK_REQ)
			== VALID_LEVEL) {
 			cpufreq_min_limit_val = val;

			/* Max lock has higher priority than Min lock */
			if (cpufreq_max_limit_val != -1 &&
			    cpufreq_min_limit_val > cpufreq_max_limit_val) {
				printk(KERN_ERR "%s: Min lock forced to %d"
					" because of Max lock\n",
					__func__, cpufreq_max_limit_val);
				/* Update PRCMU QOS value to max value */
				prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
						"power", cpufreq_max_limit_val);
				/* Set replacement flag */
				min_replacement = 1;
			} else {
				/* Update PRCMU QOS value to new value */
				prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
						"power", cpufreq_min_limit_val);
			}
		} else /* Invalid lock request --> No action */
			printk(KERN_ERR "%s: Lock request is invalid\n",
				__func__);
	}

	ret = n;
out:
	return ret;
}
Exemplo n.º 3
0
static void stm_prcmu_qos_work(struct work_struct *work)
{
	if (musb_qos_req == 100) {
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
			"musb_qos", 100);
	} else {
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
			"musb_qos", 50);
		prcmu_release_usb_wakeup_state();
	}
}
static void update_mcde_opp(struct device *dev,
					struct mcde_opp_requirements *reqs)
{
	static s32 requested_qos;
	s32 req_ape = PRCMU_QOS_DEFAULT_VALUE;
	static bool update_first = true;
	s32 req_ddr = PRCMU_QOS_DEFAULT_VALUE;

	static u8 prev_rot_channels;
	static ktime_t rot_time;
	s64 diff;

	/* If a rotation is detected, clock up CPU to max */
	if (reqs->num_rot_channels != prev_rot_channels) {
		prev_rot_channels = reqs->num_rot_channels;
		rot_time = ktime_get();
	}

	diff = ktime_to_ms(ktime_sub(ktime_get(),rot_time));

/*
 * Wait a while before clocking down again
	 * unless we have an overlay
 */
if ((reqs->num_rot_channels && reqs->num_overlays > 1) ||
		 (diff < 5000)) {
		req_ape = PRCMU_QOS_MAX_VALUE;
				req_ddr = PRCMU_QOS_MAX_VALUE;
	} else {
		req_ape = PRCMU_QOS_DEFAULT_VALUE;
		req_ddr = PRCMU_QOS_DEFAULT_VALUE;
	}

	if (req_ape != requested_qos) {
		requested_qos = req_ape;
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
						dev_name(dev), req_ape);
		prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP,
						dev_name(dev), req_ddr);
		pr_info("Requested APE QOS = %d\n", req_ape);

		if (update_first == true) {
			codina_backlight_on_off(false);
			msleep(1);
			codina_backlight_on_off(true);
			update_first = false;
		}
	}
	
}
static void ux500_msp_dai_shutdown(struct snd_pcm_substream *substream,
				struct snd_soc_dai *dai)
{
	int ret;
	struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev);
	bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);

	dev_dbg(dai->dev, "%s: MSP %d (%s): Enter.\n", __func__, dai->id,
		snd_pcm_stream_str(substream));

	if (drvdata->vape_opp_constraint == 1) {
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
					"ux500_msp_i2s", 50);
		drvdata->vape_opp_constraint = 0;
	}

	if (ux500_msp_i2s_close(drvdata->msp,
				is_playback ? MSP_DIR_TX : MSP_DIR_RX)) {
		dev_err(dai->dev,
			"%s: Error: MSP %d (%s): Unable to close i2s.\n",
			__func__, dai->id, snd_pcm_stream_str(substream));
	}

	/* Disable and unprepare clocks */
	clk_disable_unprepare(drvdata->clk);
	clk_disable_unprepare(drvdata->pclk);

	/* Disable regulator */
	ret = regulator_disable(drvdata->reg_vape);
	if (ret < 0)
		dev_err(dai->dev,
			"%s: ERROR: Failed to disable regulator (%d)!\n",
			__func__, ret);
}
Exemplo n.º 6
0
static void update_mcde_opp(struct device *dev,
					struct mcde_opp_requirements *reqs)
{
	s32 req_ape = PRCMU_QOS_DEFAULT_VALUE;

	if (reqs->num_rot_channels && reqs->num_overlays > 1)
		req_ape = PRCMU_QOS_MAX_VALUE;

	prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, dev_name(dev), req_ape);
}
Exemplo n.º 7
0
static int ux500_msp_dai_prepare(struct snd_pcm_substream *substream,
				struct snd_soc_dai *dai)
{
	int ret = 0;
	struct ux500_msp_i2s_drvdata *drvdata = dev_get_drvdata(dai->dev);
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct ux500_msp_config msp_config;

	dev_dbg(dai->dev, "%s: MSP %d (%s): Enter (rate = %d).\n", __func__,
		dai->id, snd_pcm_stream_str(substream), runtime->rate);

	setup_msp_config(substream, dai, &msp_config);

	ret = ux500_msp_i2s_open(drvdata->msp, &msp_config);
	if (ret < 0) {
		dev_err(dai->dev, "%s: Error: msp_setup failed (ret = %d)!\n",
			__func__, ret);
		return ret;
	}

	/* Set OPP-level */
	if ((drvdata->fmt & SND_SOC_DAIFMT_MASTER_MASK) &&
		(drvdata->msp->f_bitclk > 19200000)) {
		/* If the bit-clock is higher than 19.2MHz, Vape should be
		 * run in 100% OPP. Only when bit-clock is used (MSP master)
		 */
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
					"ux500-msp-i2s", 100);
		drvdata->vape_opp_constraint = 1;
	} else {
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
					"ux500-msp-i2s", 50);
		drvdata->vape_opp_constraint = 0;
	}

	return ret;
}
Exemplo n.º 8
0
static void wlan_load(struct work_struct *work)
{
	int cpu;
	unsigned int num_irqs = 0;
	static unsigned int old_num_irqs = UINT_MAX;

	for_each_online_cpu(cpu)
		num_irqs += kstat_irqs_cpu(IRQ_DB8500_SDMMC1, cpu);

	if ((num_irqs > old_num_irqs) &&
	    (num_irqs - old_num_irqs) > wlan_limit) {
		if (wlan_arm_khz)
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "wlan",
					     wlan_arm_khz);
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
					     "wlan",
					     PRCMU_QOS_MAX_VALUE);
		prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP,
					     "wlan",
					     PRCMU_QOS_MAX_VALUE);
		if (!wlan_pm_qos_is_latency_0) {
			/*
			 * The wake up latency is set to 0 to prevent
			 * the system from going to sleep. This improves
			 * the wlan throughput in DMA mode.
			 * The wake up latency from sleep adds ~5% overhead
			 * for TX in some cases.
			 * This change doesn't increase performance for wlan
			 * PIO since the CPU usage prevents sleep in this mode.
			 */
			pm_qos_add_request(&wlan_pm_qos_latency,
					   PM_QOS_CPU_DMA_LATENCY, 0);
			wlan_pm_qos_is_latency_0 = true;
		}
	} else {
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "wlan",
					     PRCMU_QOS_DEFAULT_VALUE);
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
					     "wlan",
					     PRCMU_QOS_DEFAULT_VALUE);
		prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP,
					     "wlan",
					     PRCMU_QOS_DEFAULT_VALUE);
		if (wlan_pm_qos_is_latency_0) {
			pm_qos_remove_request(&wlan_pm_qos_latency);
			wlan_pm_qos_is_latency_0 = false;
		}
	}

	old_num_irqs = num_irqs;

	schedule_delayed_work_on(0,
				 &work_wlan_workaround,
				 msecs_to_jiffies(wlan_probe_delay));
}
Exemplo n.º 9
0
static void update_mcde_opp(struct device *dev,
					struct mcde_opp_requirements *reqs)
{
	static s32 requested_qos;
	s32 req_ape = PRCMU_QOS_DEFAULT_VALUE;

	if (reqs->num_rot_channels && reqs->num_overlays > 1)
		req_ape = PRCMU_QOS_MAX_VALUE;

	if (req_ape != requested_qos) {
		requested_qos = req_ape;
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
						dev_name(dev), req_ape);
		pr_info("Requested APE QOS = %d\n", req_ape);
	}
}
Exemplo n.º 10
0
static void update_mcde_opp(struct device *dev,
					struct mcde_opp_requirements *reqs)
{
	static s32 curr_reqed;
	s32 req_ape = PRCMU_QOS_DEFAULT_VALUE;

	if (reqs->num_rot_channels && (reqs->num_overlays > 1))
		req_ape = PRCMU_QOS_MAX_VALUE;

	if (req_ape != curr_reqed) {
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, dev_name(dev), req_ape);

		dev_dbg(dev, "Requested APE QOS update to %d\n", req_ape);
		curr_reqed = req_ape;
	}
}
void musb_platform_device_en(int enable)
{
	int ret;

	if ((enable == 1) && (phy_enable_stat == USB_DISABLE)) {
		stm_musb_curr_state = USB_DEVICE;
		usb_device_phy_en(USB_ENABLE);
		return;
	}

	if ((enable == 1) && (phy_enable_stat == USB_ENABLE)) {
		/* Phy already enabled. no need to do anything. */
		return;
	}

	if ((enable == 0) && (phy_enable_stat == USB_DISABLE)) {
		/* Phy already disabled. no need to do anything. */
		return;
	}

	if ((enable == 0) && (phy_enable_stat == USB_ENABLE)) {
		/* Phy enabled. Disable it */
		abx500_set_register_interruptible(device,
			AB8500_USB,
			AB8500_USB_PHY_CTRL_REG,
			AB8500_USB_DEVICE_DISABLE);
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
			DEVICE_NAME, 50);
		prcmu_release_usb_wakeup_state();
		regulator_disable(musb_vape_supply);
		regulator_disable(musb_vintcore_supply);
		regulator_set_optimum_mode(musb_vintcore_supply, 0);
		ret = regulator_set_voltage(musb_vintcore_supply,
					    0, 1350000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set the Vintcore"
					" to 0 V .. 1.35 V, ret=%d\n",
					ret);
		regulator_disable(musb_smps2_supply);
		clk_disable(sysclock);
		phy_enable_stat = USB_DISABLE;

		return;
	}
}
static void update_mcde_opp(struct device *dev,
					struct mcde_opp_requirements *reqs)
{
	static s32 requested_qos;
	s32 req_ape = PRCMU_QOS_DEFAULT_VALUE;
#ifdef CONFIG_DEBUG_PRINTK
//	printk("rot_channel=[%d],num_overlays=[%d]\n",reqs->num_rot_channels,reqs->num_overlays);
#else
//	;
#endif
	if (reqs->num_rot_channels && reqs->num_overlays > 1)
		req_ape = PRCMU_QOS_MAX_VALUE;
	
	if (req_ape != requested_qos) {
		requested_qos = req_ape;
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
						dev_name(dev), req_ape);
		pr_info("Requested APE QOS = %d\n", req_ape);
	}

/*	prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, dev_name(dev), req_ape);*/
}
Exemplo n.º 13
0
static void set_cpu_config(enum ux500_uc new_uc)
{
	bool update = false;
	int cpu;
	int min_freq, max_freq;

	if (new_uc != current_uc)
		update = true;
	else if ((user_config_updated) && (new_uc == UX500_UC_USER))
		update = true;

	pr_debug("%s: new_usecase=%d, current_usecase=%d, update=%d\n",
		__func__, new_uc, current_uc, update);

	if (!update)
		goto exit;

	/* Cpu hotplug */
	if (!(usecase_conf[new_uc].second_cpu_online) &&
	    (num_online_cpus() > 1))
		cpu_down(1);
	else if ((usecase_conf[new_uc].second_cpu_online) &&
		 (num_online_cpus() < 2))
		cpu_up(1);

	if (usecase_conf[new_uc].max_arm)
		max_freq = usecase_conf[new_uc].max_arm;
	else
		max_freq = system_max_freq;

	if (usecase_conf[new_uc].min_arm)
		min_freq = usecase_conf[new_uc].min_arm;
	else
		min_freq = system_min_freq;

	for_each_online_cpu(cpu)
		set_cpufreq(cpu,
			    min_freq,
			    max_freq);

	/* Kinda doing the job twice, but this is needed for reference keeping */
	if (usecase_conf[new_uc].min_arm)
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "usecase",
					     usecase_conf[new_uc].min_arm);
	else
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "usecase",
					     PRCMU_QOS_DEFAULT_VALUE);

	/* Cpu idle */
	cpuidle_set_multiplier(usecase_conf[new_uc].cpuidle_multiplier);

	/* L2 prefetch */
	if (usecase_conf[new_uc].l2_prefetch_en)
		outer_prefetch_enable();
	else
		outer_prefetch_disable();

	/* Force cpuidle state */
	cpuidle_force_state(usecase_conf[new_uc].forced_state);

	/* QOS override */
	prcmu_qos_voice_call_override(usecase_conf[new_uc].vc_override);

	current_uc = new_uc;

exit:
	/* Its ok to clear even if new_uc != UX500_UC_USER */
	user_config_updated = false;
}
Exemplo n.º 14
0
/*
 * Sample sectors read and written to any MMC devices, update PRCMU
 * qos requirement
 */
static void mmc_load(struct work_struct *work)
{
	static unsigned long long old_sectors_read[PERF_MMC_HOSTS];
	static unsigned long long old_sectors_written[PERF_MMC_HOSTS];
	static struct gendisk *mmc_disks[PERF_MMC_HOSTS];
	static int cycle, nrdisk;
	static bool old_mode;
	unsigned long long sectors;
	bool new_mode = false;
	int i;
	long dr, dw;

	if (!cycle) {
		memset(&mmc_disks, 0, sizeof(mmc_disks));
		nrdisk = scan_mmc_devices(mmc_disks);
		cycle = perf_mmc_rescan_cycles;
	}
	cycle--;

	for (i = 0; i < nrdisk; i++) {
		sectors = part_stat_read(&(mmc_disks[i]->part0),
						sectors[READ]);

		dr = sectors - old_sectors_read[i];
		if (dr < 0) dr = 0;
		if (old_sectors_read[i] &&
			dr > perf_mmc_limit_read*perf_mmc_probe_delay/1000)
			new_mode = true;

		old_sectors_read[i] = sectors;
		sectors = part_stat_read(&(mmc_disks[i]->part0),
						sectors[WRITE]);

		dw = sectors - old_sectors_read[i];
		if (dw < 0) dw = 0;
		if (old_sectors_written[i] &&
			dw > perf_mmc_limit_write*perf_mmc_probe_delay/1000)
			new_mode = true;

		old_sectors_written[i] = sectors;

		if (dr + dw > perf_mmc_limit_combined*perf_mmc_probe_delay/1000)
		       new_mode = true;
	}

	if (!old_mode && new_mode) {
		if (perf_mmc_arm_khz)
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "mmc",
					     perf_mmc_arm_khz);
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
					     "mmc",
					     PRCMU_QOS_MAX_VALUE);
		prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP,
					     "mmc",
					     PRCMU_QOS_MAX_VALUE);
	}

	if (old_mode && !new_mode) {
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "mmc",
					     PRCMU_QOS_DEFAULT_VALUE);
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
					     "mmc",
					     PRCMU_QOS_DEFAULT_VALUE);
		prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP,
					     "mmc",
					     PRCMU_QOS_DEFAULT_VALUE);
	}

	old_mode = new_mode;

	schedule_delayed_work(&work_mmc,
				 msecs_to_jiffies(perf_mmc_probe_delay));

}
/* boost switching logic:
 * - boost_working means that freq is already set to high value
 * - boost_scheduled means that job is scheduled to turn boost either on or off
 * - boost_required is a flag for scheduled job telling it what to do with boost
 *
 * if we are in APE_50_OPP, skip boost
 * if we are in APE_100_OPP and util>boost_up_thresh, shedule boost if its not on or - if its on and scheduled to be turned off -  cancel that schedule
 * if boost is scheduled and not yet working and util < util_high_to_low, then cancel scheduled boost
 * if boost is on and util < boost_down_thresh, schedule boost to be turned off
 */
void mali_utilization_function(struct work_struct *ptr)
{
	/*By default, platform start with 50% APE OPP and 25% DDR OPP*/
	static u32 has_requested_low = 1;

	MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u\n", mali_last_utilization));

	mutex_lock(&mali_boost_lock);
	if ((!boost_required && !boost_working && !boost_scheduled) || !boost_enable) {
		// consider power saving mode (APE_50_OPP) only if we're not on boost
		int ape_opp = prcmu_get_ape_opp();
		/*
		if APE_OPP==100 because someone else wanted this (touchboost in particular) and not mali,
		we should check if utilization is high enough so we can also request high ape to maintain
		satisfying UI performance.
		*/
		int up_threshold = (ape_opp == APE_50_OPP && has_requested_low ? mali_utilization_low_to_high : (mali_utilization_high_to_low));

		if (mali_last_utilization >= up_threshold) {
			if (has_requested_low) {
				MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u SIGNAL_HIGH\n", mali_last_utilization));
				/*Request 100% APE_OPP.*/
				prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "mali", PRCMU_QOS_MAX_VALUE);
				/*
				* Since the utilization values will be reported higher
				* if DDR_OPP is lowered, we also request 100% DDR_OPP.
				*/
				prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "mali", PRCMU_QOS_MAX_VALUE);
				has_requested_low = 0;
				mutex_unlock(&mali_boost_lock);
				return;		//After we switch to APE_100_OPP we want to measure utilization once again before entering boost logic
			}
		} else {
			if (mali_last_utilization < mali_utilization_high_to_low) {
				if (!has_requested_low) {
					/*Remove APE_OPP and DDR_OPP requests*/
					prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "mali", PRCMU_QOS_DEFAULT_VALUE);
					prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "mali", PRCMU_QOS_DEFAULT_VALUE);
					MALI_DEBUG_PRINT(5, ("MALI GPU utilization: %u SIGNAL_LOW\n", mali_last_utilization));
					has_requested_low = 1;
				}
			}
		}
	}

	if (!has_requested_low && boost_enable) {
		// consider boost only if we are in APE_100_OPP mode
		if (!boost_required && mali_last_utilization > boost_upthreshold) {
			boost_required = true;
			if (!boost_scheduled) {
				//schedule job to turn boost on
				boost_scheduled = true;
				schedule_delayed_work(&mali_boost_delayedwork, msecs_to_jiffies(boost_delay));
			} else {
				//cancel job meant to turn boost off
				boost_scheduled = false;
				cancel_delayed_work(&mali_boost_delayedwork);
			}
		} else if (boost_required && !boost_working && mali_last_utilization < boost_downthreshold) {
			boost_required = false;
			if (boost_scheduled) {
				//if it's not working yet, but is scheduled to be turned on, than cancel scheduled job
				cancel_delayed_work(&mali_boost_delayedwork);
				boost_scheduled = false;
			}
		} else if (boost_working && mali_last_utilization < boost_downthreshold) {
			boost_required = false;
			if (!boost_scheduled) {
				// if boost is on and isn't yet scheduled to be turned off then schedule it
				boost_scheduled = true;
				schedule_delayed_work(&mali_boost_delayedwork, msecs_to_jiffies(boost_delay));
			}
		}
	}
	mutex_unlock(&mali_boost_lock);

}
/**
 * usb_device_phy_en() - for enabling the 5V to usb gadget
 * @enable: to enabling the Phy for device.
 *
 * This function used to set the voltage for USB gadget mode.
 */
static void usb_device_phy_en(int enable)
{
	int volt = 0;
	int ret = -1;

	if (phy_enable_stat == enable)
		return;

	if (enable == USB_ENABLE) {
		wake_lock(&ab8500_musb_wakelock);
		ux500_pins_enable(usb_gpio_pins);
		clk_enable(sysclock);
		phy_enable_stat = USB_ENABLE;
		regulator_enable(musb_vape_supply);
		regulator_enable(musb_smps2_supply);

		/* Set Vintcore12 LDO to 1.3V */
		ret = regulator_set_voltage(musb_vintcore_supply,
						1300000, 1350000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set the Vintcore"
					" to 1.3V, ret=%d\n", ret);
		ret = regulator_set_optimum_mode(musb_vintcore_supply,
						 28000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set optimum mode"
					" (ret=%d)\n", ret);
		regulator_enable(musb_vintcore_supply);
		volt = regulator_get_voltage(musb_vintcore_supply);
		if ((volt != 1300000) && (volt != 1350000))
			printk(KERN_ERR "Vintcore is not"
					" set to 1.3V"
					" volt=%d\n", volt);
#ifdef	CONFIG_PM
		stm_musb_context(USB_ENABLE);
#endif

		/* Workaround for USB performance issue. */
		schedule_delayed_work_on(0,
				 &work_usb_workaround,
				 msecs_to_jiffies(USB_PROBE_DELAY));

		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
				DEVICE_NAME, 100);

		abx500_set_register_interruptible(device,
				AB8500_USB,
				AB8500_USB_PHY_CTRL_REG,
				AB8500_USB_DEVICE_ENABLE);
	} else { /* enable == USB_DISABLE */
		/*
		 * Workaround: Sometimes the DISCONNECT interrupt is
		 * not generated in musb_core. Force a disconnect if
		 * necessary before we power down the PHY.
		 */
		stm_musb_force_disconnect();

		if (boot_time_flag)
			boot_time_flag = USB_DISABLE;

		/*
		 * Workaround for bug31952 in ABB cut2.0. Write 0x1
		 * before disabling the PHY.
		 */
		abx500_set_register_interruptible(device, AB8500_USB,
			     AB8500_USB_PHY_CTRL_REG,
			     AB8500_USB_DEVICE_ENABLE);

		udelay(200);

		abx500_set_register_interruptible(device,
			AB8500_USB,
			AB8500_USB_PHY_CTRL_REG,
			AB8500_USB_DEVICE_DISABLE);
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
				DEVICE_NAME, 50);

		/* Workaround for USB performance issue. */
		cancel_delayed_work_sync(&work_usb_workaround);
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
					     "usb", 25);

		prcmu_release_usb_wakeup_state();
		phy_enable_stat = USB_DISABLE;
		regulator_disable(musb_vape_supply);
		regulator_disable(musb_smps2_supply);
		regulator_disable(musb_vintcore_supply);
		regulator_set_optimum_mode(musb_vintcore_supply, 0);
		/* Set Vintcore12 LDO to 0V to 1.35V */
		ret = regulator_set_voltage(musb_vintcore_supply,
						0000000, 1350000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set the Vintcore"
					" to 0V to 1.35V,"
					" ret=%d\n", ret);
		clk_disable(sysclock);
#ifdef CONFIG_PM
		stm_musb_context(USB_DISABLE);
#endif
		ux500_pins_disable(usb_gpio_pins);
		wake_unlock(&ab8500_musb_wakelock);
	}
}
/**
 * usb_device_phy_en() - for enabling the 5V to usb gadget
 * @enable: to enabling the Phy for device.
 *
 * This function used to set the voltage for USB gadget mode.
 */
static void usb_device_phy_en(int enable)
{
	int volt = 0;
	int ret = -1;

	if (phy_enable_stat == enable)
		return;

	if (enable == USB_ENABLE) {
		wake_lock(&ab8500_musb_wakelock);
		clk_enable(sysclock);
		phy_enable_stat = USB_ENABLE;
		regulator_enable(musb_vape_supply);
		regulator_enable(musb_smps2_supply);

		/* Set Vintcore12 LDO to 1.3V */
		ret = regulator_set_voltage(musb_vintcore_supply,
						1300000, 1350000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set the Vintcore"
					" to 1.3V, ret=%d\n", ret);
		ret = regulator_set_optimum_mode(musb_vintcore_supply,
						 28000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set optimum mode"
					" (ret=%d)\n", ret);
		regulator_enable(musb_vintcore_supply);
		volt = regulator_get_voltage(musb_vintcore_supply);
		if ((volt != 1300000) && (volt != 1350000))
			printk(KERN_ERR "Vintcore is not"
					" set to 1.3V"
					" volt=%d\n", volt);
#ifdef	CONFIG_PM
		stm_musb_context(USB_ENABLE);
#endif
		usb_kick_watchdog();

		/* Workaround for USB performance issue. */
		cpufreq_usb_connect_notify(true);

		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
				DEVICE_NAME, 100);

		abx500_set_register_interruptible(device,
				AB8500_USB,
				AB8500_USB_PHY_CTRL_REG,
				AB8500_USB_DEVICE_ENABLE);
	} else { /* enable == USB_DISABLE */
		if (boot_time_flag)
			boot_time_flag = USB_DISABLE;

		/*
		 * Workaround for bug31952 in ABB cut2.0. Write 0x1
		 * before disabling the PHY.
		 */
		abx500_set_register_interruptible(device, AB8500_USB,
			     AB8500_USB_PHY_CTRL_REG,
			     AB8500_USB_DEVICE_ENABLE);

		udelay(100);

		abx500_set_register_interruptible(device,
			AB8500_USB,
			AB8500_USB_PHY_CTRL_REG,
			AB8500_USB_DEVICE_DISABLE);
		prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP,
				DEVICE_NAME, 50);

		/* Workaround for USB performance issue. */
		cpufreq_usb_connect_notify(false);

		prcmu_release_usb_wakeup_state();
		phy_enable_stat = USB_DISABLE;
		regulator_disable(musb_vape_supply);
		regulator_disable(musb_smps2_supply);
		regulator_disable(musb_vintcore_supply);
		regulator_set_optimum_mode(musb_vintcore_supply, 0);
		/* Set Vintcore12 LDO to 0V to 1.35V */
		ret = regulator_set_voltage(musb_vintcore_supply,
						0000000, 1350000);
		if (ret < 0)
			printk(KERN_ERR "Failed to set the Vintcore"
					" to 0V to 1.35V,"
					" ret=%d\n", ret);
		clk_disable(sysclock);
#ifdef CONFIG_PM
		stm_musb_context(USB_DISABLE);
#endif
		wake_unlock(&ab8500_musb_wakelock);
	}
}