示例#1
0
static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
					       unsigned long action,
					       void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;

	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		cpufreq_update_policy(cpu);
		break;
	case CPU_DOWN_PREPARE:
		cpufreq_stats_free_sysfs(cpu);
		break;
	case CPU_DOWN_PREPARE_FROZEN:
		cpufreq_stats_free_table(cpu);
		break;
	case CPU_DOWN_FAILED:
	case CPU_DOWN_FAILED_FROZEN:
		cpufreq_stats_create_table_cpu(cpu);
		break;
	}
	return NOTIFY_OK;
}
示例#2
0
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	while(1) {
		ret = wait_event_interruptible(s->sync_wq, s->pending ||
					kthread_should_stop());

		if (kthread_should_stop())
			break;

		if (ret == -ERESTARTSYS)
			continue;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (dest_policy.cur >= src_policy.cur ) {
			pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
				 dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
			continue;
		}

		if (sync_threshold && (dest_policy.cur >= sync_threshold))
			continue;

		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold) {
			if (src_policy.cur >= sync_threshold)
				s->boost_min = sync_threshold;
			else
				s->boost_min = src_policy.cur;
		} else {
			s->boost_min = src_policy.cur;
		}
		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
示例#3
0
static void dvfs_core_work_handler(struct work_struct *work)
{
	u32 fsvai;
	u32 reg;
	u32 curr_cpu;
	int ret = 0;
	int maxf = 0, minf = 0;
	int low_freq_bus_ready = 0;
	int bus_incr = 0, cpu_dcr = 0;

	low_freq_bus_ready = low_freq_bus_used();

	/* Check DVFS frequency adjustment interrupt status */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	fsvai = (reg & MXC_DVFSCNTR_FSVAI_MASK) >> MXC_DVFSCNTR_FSVAI_OFFSET;
	/* Check FSVAI, FSVAI=0 is error */
	if (fsvai == FSVAI_FREQ_NOCHANGE) {
		/* Do nothing. Freq change is not required */
		goto END;
	}
	curr_cpu = clk_get_rate(cpu_clk);

	/* If FSVAI indicate freq down,
	   check arm-clk is not in lowest frequency 200 MHz */
	if (fsvai == FSVAI_FREQ_DECREASE) {
		if (curr_cpu == cpu_wp_tbl[cpu_wp_nr - 1].cpu_rate) {
			minf = 1;
			if (low_bus_freq_mode)
				goto END;
		} else {
			/* freq down */
			curr_wp++;
			if (curr_wp >= cpu_wp_nr) {
				curr_wp = cpu_wp_nr - 1;
				goto END;
			}

			if (curr_wp == cpu_wp_nr - 1 && !low_freq_bus_ready) {
				minf = 1;
				dvfs_load_config(1);
			} else {
				cpu_dcr = 1;
			}
		}
	} else {
		if (curr_cpu == cpu_wp_tbl[0].cpu_rate) {
			maxf = 1;
			goto END;
		} else {
			if (!high_bus_freq_mode) {
				/* bump up LP freq first. */
				bus_incr = 1;
				dvfs_load_config(2);
			} else {
				/* freq up */
				curr_wp = 0;
				maxf = 1;
				dvfs_load_config(0);
			}
		}
	}

	low_freq_bus_ready = low_freq_bus_used();
	if ((curr_wp == cpu_wp_nr - 1) && (!low_bus_freq_mode)
	    && (low_freq_bus_ready) && !bus_incr) {
		if (cpu_dcr)
			ret = set_cpu_freq(curr_wp);
		if (!cpu_dcr) {
			set_low_bus_freq();
			dvfs_load_config(3);
		} else {
			dvfs_load_config(2);
			cpu_dcr = 0;
		}
	} else {
		if (!high_bus_freq_mode)
			set_high_bus_freq(1);

		if (!bus_incr)
			ret = set_cpu_freq(curr_wp);
		bus_incr = 0;
	}


END:	/* Set MAXF, MINF */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	reg = (reg & ~(MXC_DVFSCNTR_MAXF_MASK | MXC_DVFSCNTR_MINF_MASK));
	reg |= maxf << MXC_DVFSCNTR_MAXF_OFFSET;
	reg |= minf << MXC_DVFSCNTR_MINF_OFFSET;

	/* Enable DVFS interrupt */
	/* FSVAIM=0 */
	reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
	reg |= FSVAI_FREQ_NOCHANGE;
	/* LBFL=1 */
	reg = (reg & ~MXC_DVFSCNTR_LBFL);
	reg |= MXC_DVFSCNTR_LBFL;
	__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
	/*Unmask GPC1 IRQ */
	reg = __raw_readl(dvfs_data->gpc_cntr_reg_addr);
	reg &= ~MXC_GPCCNTR_GPCIRQM;
	__raw_writel(reg, dvfs_data->gpc_cntr_reg_addr);

#if defined(CONFIG_CPU_FREQ)
	if (cpufreq_trig_needed == 1) {
		cpufreq_trig_needed = 0;
		cpufreq_update_policy(0);
	}
#endif
}
示例#4
0
static void exynos4_handler_tmu_state(struct work_struct *work)
{
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct s5p_tmu_info *info =
		container_of(delayed_work, struct s5p_tmu_info, polling);
	struct s5p_platform_tmu *data = info->dev->platform_data;
	unsigned int cur_temp;
	static int auto_refresh_changed;
	static int check_handle;
	int trend = 0;
	int cpu = 0;

	mutex_lock(&tmu_lock);

	cur_temp = get_curr_temp(info);
	trend = cur_temp - info->last_temperature;
	pr_debug("curr_temp = %u, temp_diff = %d\n", cur_temp, trend);

	switch (info->tmu_state) {
#if defined(CONFIG_TC_VOLTAGE)
	case TMU_STATUS_TC:
		/* lock has priority than unlock */
		if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
		} else if (cur_temp >= data->ts.stop_tc) {
			if (exynos_tc_volt(info, 0) < 0) {
				pr_err("TMU: unlock error!\n");
			} else {
				info->tmu_state = TMU_STATUS_NORMAL;
				pr_info("change state: tc -> normal.\n");
			}
		}
		/* free if upper limit is locked */
		if (check_handle) {
			exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
			check_handle = 0;
		}
		break;
#endif
	case TMU_STATUS_NORMAL:
		/* 1. change state: 1st-throttling */
		if (cur_temp >= data->ts.start_1st_throttle) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: normal->throttle.\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0) {
				pr_err("TMU: lock error!\n");
			} else {
				info->tmu_state = TMU_STATUS_TC;
				pr_info("change state: normal->tc.\n");
			}
#endif
		/* 2. polling end and uevent */
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (cur_temp <= data->ts.stop_mem_throttle)) {
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("normal: free cpufreq_limit & interrupt enable.\n");

			for_each_online_cpu(cpu)
				cpufreq_update_policy(cpu);

			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_base + EXYNOS4_TMU_INTCLEAR);
			exynos_interrupt_enable(info, 1);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_THROTTLED:
		/* 1. change state: 2nd-throttling or warning */
		if (cur_temp >= data->ts.start_2nd_throttle) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: 1st throttle->2nd throttle.\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_1st_throttle) &&
			!(check_handle & THROTTLE_FLAG)) {
			if (check_handle & WARNING_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(WARNING_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_1st_throttle);
			check_handle |= THROTTLE_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("throttling: set cpufreq upper limit.\n");
		/* 3. change state: normal */
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_NORMAL;
			pr_info("change state: 1st throttle->normal.\n");
		}
		break;

	case TMU_STATUS_WARNING:
		/* 1. change state: tripping */
		if (cur_temp >= data->ts.start_tripping) {
			info->tmu_state = TMU_STATUS_TRIPPED;
			pr_info("change state: 2nd throttle->trip\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_2nd_throttle) &&
			!(check_handle & WARNING_FLAG)) {
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_2nd_throttle);

			check_handle |= WARNING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("2nd throttle: cpufreq is limited.\n");
		/* 3. change state: 1st-throttling */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: 2nd throttle->1st throttle, "
				"and release cpufreq upper limit.\n");
		}
		break;

	case TMU_STATUS_TRIPPED:
		/* 1. call uevent to shut-down */
		if ((cur_temp >= data->ts.start_tripping) &&
			(trend > 0) && !(check_handle & TRIPPING_FLAG)) {
			notify_change_of_tmu_state(info);
			pr_info("tripping: on waiting shutdown.\n");
			check_handle |= TRIPPING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
			info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. change state: 2nd-throttling or warning */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
				&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: trip->2nd throttle, "
				"Check! occured only test mode.\n");
		}
		/* 3. chip protection: kernel panic as SW workaround */
		if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) {
			panic("Emergency!!!! tripping is not treated!\n");
			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_state + EXYNOS4_TMU_INTCLEAR);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_INIT:
		/* sned tmu initial status to platform */
		disable_irq(info->irq);
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
		}
#endif
		else if (cur_temp >= data->ts.start_2nd_throttle)
			info->tmu_state = TMU_STATUS_WARNING;
		else if (cur_temp >= data->ts.start_1st_throttle)
			info->tmu_state = TMU_STATUS_THROTTLED;
		else if (cur_temp <= data->ts.stop_1st_throttle)
			info->tmu_state = TMU_STATUS_NORMAL;

		notify_change_of_tmu_state(info);
		pr_info("%s: inform to init state to platform.\n", __func__);
		break;

	default:
		pr_warn("Bug: checked tmu_state.\n");
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
		}
#endif
		else
			info->tmu_state = TMU_STATUS_WARNING;
		break;
	} /* end */

	info->last_temperature = cur_temp;

	/* reschedule the next work */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
			info->sampling_rate);

	mutex_unlock(&tmu_lock);

	return;
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	if (!cpuboost_enable) return 0;

	while (1) {
		wait_event(s->sync_wq, s->pending || kthread_should_stop());
#ifdef CONFIG_IRLED_GPIO
		if (unlikely(gir_boost_disable)) {
			pr_debug("[GPIO_IR][%s] continue~!(cpu:%d)\n", 
				__func__, raw_smp_processor_id());
			continue;
		}
#endif

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (s->task_load < migration_load_threshold)
			continue;

		if (s->task_load < migration_load_threshold)
			continue;

		req_freq = load_based_syncs ?
			(dest_policy.max * s->task_load) / 100 : src_policy.cur;

		if (req_freq <= dest_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Sync Freq:%u\n", req_freq);
			continue;
		}

		if (sync_threshold)
			req_freq = min(sync_threshold, req_freq);

		cancel_delayed_work_sync(&s->boost_rem);

#ifdef CONFIG_CPUFREQ_HARDLIMIT
        s->boost_min = check_cpufreq_hardlimit(req_freq);
#else
		s->boost_min = req_freq;
#endif

		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(src_cpu))
			/*
			 * Send an unchanged policy update to the source
			 * CPU. Even though the policy isn't changed from
			 * its existing boosted or non-boosted state
			 * notifying the source CPU will let the governor
			 * know a boost happened on another CPU and that it
			 * should re-evaluate the frequency at the next timer
			 * event without interference from a min sample time.
			 */
			cpufreq_update_policy(src_cpu);
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	while (1) {
		wait_event_interruptible(s->sync_wq,
					s->pending || kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (src_policy.cur == src_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Source CPU%d@%dKHz at min freq\n",
				 src_cpu, src_policy.cur);
			continue;
		}

		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold) {
			if (src_policy.cur >= sync_threshold)
				s->boost_min = sync_threshold;
			else
				s->boost_min = src_policy.cur;
		} else {
			s->boost_min = src_policy.cur;
		}
		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(src_cpu))
			/*
			 * Send an unchanged policy update to the source
			 * CPU. Even though the policy isn't changed from
			 * its existing boosted or non-boosted state
			 * notifying the source CPU will let the governor
			 * know a boost happened on another CPU and that it
			 * should re-evaluate the frequency at the next timer
			 * event without interference from a min sample time.
			 */
			cpufreq_update_policy(src_cpu);
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int)data;
	int src_cpu, ret;
	struct boost_policy *b = &per_cpu(boost_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	while (1) {
		wait_event_interruptible(b->sync_wq, b->pending ||
					kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&b->lock, flags);
		b->pending = false;
		src_cpu = b->src_cpu;
		spin_unlock_irqrestore(&b->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		req_freq = max((dest_policy.max * b->task_load) / 100,
							src_policy.cur);

		if (req_freq <= dest_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Sync Freq:%u\n", req_freq);
			continue;
		}

		if (sync_threshold)
			req_freq = min(sync_threshold, req_freq);

		cancel_delayed_work_sync(&b->mig_boost_rem);

		b->migration_freq = req_freq;

		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(src_cpu))
			/*
			 * Send an unchanged policy update to the source
			 * CPU. Even though the policy isn't changed from
			 * its existing boosted or non-boosted state
			 * notifying the source CPU will let the governor
			 * know a boost happened on another CPU and that it
			 * should re-evaluate the frequency at the next timer
			 * event without interference from a min sample time.
			 */
			cpufreq_update_policy(src_cpu);
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, boost_wq,
				&b->mig_boost_rem, msecs_to_jiffies(migration_boost_ms));
		} else
			b->migration_freq = 0;
		put_online_cpus();
	}

	return 0;
}
示例#8
0
/*!
******************************************************************************

 @Function	SysDeinitialise

 @Description De-initialises kernel services at 'driver unload' time

 @Return   PVRSRV_ERROR  :

******************************************************************************/
PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
{
    SYS_SPECIFIC_DATA * psSysSpecData;
    PVRSRV_ERROR eError;

    if (psSysData == IMG_NULL) {
        PVR_DPF((PVR_DBG_ERROR, "SysDeinitialise: Called with NULL SYS_DATA pointer.  Probably called before."));
        return PVRSRV_OK;
    }

    psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;

#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
    /* TODO: regulator and clk put. */
    cpufreq_unregister_notifier(&cpufreq_limit_notifier,
                                CPUFREQ_POLICY_NOTIFIER);
    cpufreq_update_policy(current_thread_info()->cpu);
#endif

#if defined(SYS_USING_INTERRUPTS)
    if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LISR)
    {
        eError = OSUninstallSystemLISR(psSysData);
        if (eError != PVRSRV_OK)
        {
            PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallSystemLISR failed"));
            return eError;
        }
    }
#endif

    if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_MISR)
    {
        eError = OSUninstallMISR(psSysData);
        if (eError != PVRSRV_OK)
        {
            PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
            return eError;
        }
    }

    /* de-initialise all services managed devices */
    eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID);
    if (eError != PVRSRV_OK)
    {
        PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
        return eError;
    }

    eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
    if (eError != PVRSRV_OK)
    {
        PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
        return eError;
    }

    SysDeinitialiseCommon(gpsSysData);

    gpsSysData = IMG_NULL;

    return PVRSRV_OK;
}
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	while(1) {
		wait_event(s->sync_wq, s->pending || kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (src_policy.min == src_policy.cur &&
				src_policy.min <= dest_policy.min){
			continue;
		}


		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold) {
			if (src_policy.cur >= sync_threshold)
				s->boost_min = sync_threshold;
			else
				s->boost_min = src_policy.cur;
		} else {
			s->boost_min = src_policy.cur;
		}
		/* Force policy re-evaluation to trigger adjust notifier. */
		cpufreq_update_policy(dest_cpu);
		/* Notify source CPU of policy change */
		cpufreq_update_policy(src_cpu);
#if defined(CONFIG_ARCH_MSM8974) || defined(CONFIG_ARCH_MSM8974PRO)
		get_online_cpus();
		if (cpu_online(dest_cpu))
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		put_online_cpus();
#else
		queue_delayed_work_on(s->cpu, cpu_boost_wq,
			&s->boost_rem, msecs_to_jiffies(boost_ms));
#endif
	}

	return 0;
}
示例#10
0
static void run_boost_migration(unsigned int cpu)
{
	int dest_cpu = cpu;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	spin_lock_irqsave(&s->lock, flags);
	s->pending = false;
	src_cpu = s->src_cpu;
	spin_unlock_irqrestore(&s->lock, flags);

	ret = cpufreq_get_policy(&src_policy, src_cpu);
	if (ret)
		return;

	ret = cpufreq_get_policy(&dest_policy, dest_cpu);
	if (ret)
		return;

	req_freq = load_based_syncs ?
		(dest_policy.max * s->task_load) / 100 :
						src_policy.cur;

	if (req_freq <= dest_policy.cpuinfo.min_freq) {
			pr_debug("No sync. Sync Freq:%u\n", req_freq);
		return;
	}

	if (sync_threshold)
		req_freq = min(sync_threshold, req_freq);

	cancel_delayed_work_sync(&s->boost_rem);

	s->boost_min = req_freq;

	/* Force policy re-evaluation to trigger adjust notifier. */
	get_online_cpus();
	if (cpu_online(src_cpu))
		/*
		 * Send an unchanged policy update to the source
		 * CPU. Even though the policy isn't changed from
		 * its existing boosted or non-boosted state
		 * notifying the source CPU will let the governor
		 * know a boost happened on another CPU and that it
		 * should re-evaluate the frequency at the next timer
		 * event without interference from a min sample time.
		 */
		cpufreq_update_policy(src_cpu);
	if (cpu_online(dest_cpu)) {
		cpufreq_update_policy(dest_cpu);
		queue_delayed_work_on(dest_cpu, cpu_boost_wq,
			&s->boost_rem, msecs_to_jiffies(boost_ms));
	} else {
		s->boost_min = 0;
	}
	put_online_cpus();
}
示例#11
0
static ssize_t cpufreq_max_limit_store(struct kobject *kobj,
					struct kobj_attribute *attr,
					const char *buf, size_t n)
{
	int val;
	unsigned int cpufreq_level;
	ssize_t ret = -EINVAL;
	int cpu;

	if (sscanf(buf, "%d", &val) != 1) {
		printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__);
		goto out;
	}

	if (val == -1) { /* Unlock request */
		if (cpufreq_max_limit_val != -1) {
			/* Reset lock value to default */
 			cpufreq_max_limit_val = -1;

			/* Update CPU frequency policy */
			for_each_online_cpu(cpu)
				cpufreq_update_policy(cpu);

			/* Update PRCMU QOS value to min value */
			if(min_replacement && cpufreq_min_limit_val != -1) {
				prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
						"power", cpufreq_min_limit_val);
				/* Clear replacement flag */
				min_replacement = 0;
			}
		} else /* Already unlocked */
			printk(KERN_ERR "%s: Unlock request is ignored\n",
				__func__);
	} else { /* Lock request */
		if (get_cpufreq_level((unsigned int)val, &cpufreq_level, DVFS_MAX_LOCK_REQ)
		    == VALID_LEVEL) {
 			cpufreq_max_limit_val = val;

			/* Max lock has higher priority than Min lock */
			if (cpufreq_min_limit_val != -1 &&
			    cpufreq_min_limit_val > cpufreq_max_limit_val) {
				printk(KERN_ERR "%s: Min lock forced to %d"
					" because of Max lock\n",
					__func__, cpufreq_max_limit_val);
				/* Update PRCMU QOS value to max value */
				prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
						"power", cpufreq_max_limit_val);
				/* Set replacement flag */
				min_replacement = 1;
			}

			/* Update CPU frequency policy */
			for_each_online_cpu(cpu)
				cpufreq_update_policy(cpu);
		} else /* Invalid lock request --> No action */
			printk(KERN_ERR "%s: Lock request is invalid\n",
				__func__);
	}

	ret = n;
out:
	return ret;
}