static int update_core_config(unsigned int cpunumber, bool up)
{
	int ret = -EINVAL;
	unsigned int nr_cpus = num_online_cpus();
	int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
	int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);

	if (cpq_state == TEGRA_CPQ_DISABLED || cpunumber >= nr_cpu_ids)
		return ret;

	if (up) {
		if(is_lp_cluster()) {
			cpumask_set_cpu(cpunumber, &cr_online_requests);
			ret = -EBUSY;
		} else {
			if (tegra_cpu_edp_favor_up(nr_cpus, mp_overhead) &&
			    nr_cpus < max_cpus)
				ret = cpu_up(cpunumber);
		}
	} else {
		if (is_lp_cluster()) {
			ret = -EBUSY;
		} else {
			if (nr_cpus > min_cpus)
				ret = cpu_down(cpunumber);
		}
	}

	return ret;
}
Esempio n. 2
0
static int update_core_config(unsigned int cpunumber, bool up)
{
	int ret = -EINVAL;
	unsigned int nr_cpus = num_online_cpus();
	int max_cpus = tegra_cpq_max_cpus();
	int min_cpus = tegra_cpq_min_cpus();

#if CPUQUIET_DEBUG_VERBOSE
	pr_info(CPUQUIET_TAG "%s\n", __func__);
#endif
				
	if (cpq_state == TEGRA_CPQ_DISABLED || cpunumber >= nr_cpu_ids)
		return ret;

	/* sync with tegra_cpuquiet_work_func 
	 else if we are currently switching to LP and an up
	 comes we can end up with more then 1 core up and
	 governor stopped and !lp mode */
    if (!mutex_trylock (&hotplug_lock)){
#if CPUQUIET_DEBUG_VERBOSE
		pr_info(CPUQUIET_TAG "%s failed to get hotplug_lock\n", __func__);
#endif
        return -EBUSY;
	}
			
	if (up) {
		if(is_lp_cluster()) {
			ret = -EBUSY;
		} else {
			if (nr_cpus < max_cpus){
				show_status("UP", 0, cpunumber);
				ret = cpu_up(cpunumber);
			}
		}
	} else {
		if (is_lp_cluster()) {
			ret = -EBUSY;
		} else {
			if (nr_cpus > 1 && nr_cpus > min_cpus){
				show_status("DOWN", 0, cpunumber);
				ret = cpu_down(cpunumber);
			}
		}
	}

	mutex_unlock(&hotplug_lock);
			
	return ret;
}
Esempio n. 3
0
static void min_cpus_change(void)
{
	bool g_cluster = false;
    cputime64_t on_time = 0;
	
	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	mutex_lock(tegra3_cpu_lock);

	if ((tegra_cpq_min_cpus() >= 2) && is_lp_cluster()) {
		if (switch_clk_to_gmode()){
			pr_err(CPUQUIET_TAG "min_cpus_change - switch_clk_to_gmode failed\n");
			mutex_unlock(tegra3_cpu_lock);
			return;
		}
		
		on_time = ktime_to_ms(ktime_get()) - lp_on_time;
		show_status("LP -> off - min_cpus_change", on_time, -1);

		g_cluster = true;
	}

	tegra_cpu_set_speed_cap(NULL);
	mutex_unlock(tegra3_cpu_lock);

	schedule_work(&minmax_work);

	if (g_cluster && !manual_hotplug)
		cpuquiet_device_free();
}
Esempio n. 4
0
void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
{
	if (!is_g_cluster_present())
		return;

	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	cpq_state = TEGRA_CPQ_IDLE;
	is_suspended = suspend;
	
	if (suspend) {
		return;
	}

	if (is_lp_cluster() && 
			(cpu_freq > idle_top_freq || no_lp)) {
       	cpq_state = TEGRA_CPQ_SWITCH_TO_G;
		queue_delayed_work(cpuquiet_wq, &cpuquiet_work, msecs_to_jiffies(lp_up_delay));
	} else if (cpu_freq <= idle_top_freq && lp_possible()) {
		cpq_state = TEGRA_CPQ_SWITCH_TO_LP;
		if (queue_delayed_work(cpuquiet_wq, &cpuquiet_work, msecs_to_jiffies(lp_down_delay)))
#if CPUQUIET_DEBUG_VERBOSE
        	pr_info(CPUQUIET_TAG "qeued TEGRA_CPQ_SWITCH_TO_LP\n");		
#else
			;
#endif
	}
}
Esempio n. 5
0
void tegra_init_cache(void)
{
#ifdef CONFIG_CACHE_L2X0
	void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
	u32 aux_ctrl;

#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
	writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
	writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);

#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
#ifdef CONFIG_TEGRA_SILICON_PLATFORM
	/* PL310 RAM latency is CPU dependent. NOTE: Changes here
	   must also be reflected in __cortex_a9_l2x0_restart */

	if (is_lp_cluster()) {
		writel(0x221, p + L2X0_TAG_LATENCY_CTRL);
		writel(0x221, p + L2X0_DATA_LATENCY_CTRL);
	} else {
		writel(0x441, p + L2X0_TAG_LATENCY_CTRL);
		writel(0x551, p + L2X0_DATA_LATENCY_CTRL);
	}
#else
	writel(0x770, p + L2X0_TAG_LATENCY_CTRL);
	writel(0x770, p + L2X0_DATA_LATENCY_CTRL);
#endif
#endif
	aux_ctrl = readl(p + L2X0_CACHE_TYPE);
	aux_ctrl = (aux_ctrl & 0x700) << (17-8);
	aux_ctrl |= 0x7C000001;
	l2x0_init(p, aux_ctrl, 0x8200c3fe);
#endif
}
Esempio n. 6
0
static bool is_cpu_powered(unsigned int cpu)
{
	if (is_lp_cluster())
		return true;
	else
		return tegra_powergate_is_powered(TEGRA_CPU_POWERGATE_ID(cpu));
}
static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
{
	bool g_cluster = false;

	if (cpq_state == TEGRA_CPQ_DISABLED)
		return NOTIFY_OK;

	mutex_lock(tegra3_cpu_lock);

	if ((n >= 1) && is_lp_cluster()) {
		/* make sure cpu rate is within g-mode
		 * range before switching */
		unsigned long speed = max((unsigned long)tegra_getspeed(0),
					clk_get_min_rate(cpu_g_clk) / 1000);
		tegra_update_cpu_speed(speed);

		clk_set_parent(cpu_clk, cpu_g_clk);
		g_cluster = true;
	}

	tegra_cpu_set_speed_cap(NULL);
	mutex_unlock(tegra3_cpu_lock);

	schedule_work(&minmax_work);

	if (g_cluster)
		cpuquiet_device_free();

	return NOTIFY_OK;
}
static int __init sysfscluster_init(void)
{
	int e;

	TRACE_CLUSTER(("+sysfscluster_init\n"));

	spin_lock_init(&cluster_lock);
	cluster_kobj = kobject_create_and_add("cluster", kernel_kobj);

	CREATE_FILE(active);
	CREATE_FILE(immediate);
	CREATE_FILE(force);
	CREATE_FILE(wake_ms);
#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
	CREATE_FILE(powermode);
#endif
#ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE
	CREATE_FILE(powergate);
#endif
#if DEBUG_CLUSTER_SWITCH
	CREATE_FILE(debug);
#endif

	spin_lock(&cluster_lock);
	if (is_lp_cluster())
		flags |= TEGRA_POWER_CLUSTER_LP;
	else
		flags |= TEGRA_POWER_CLUSTER_G;
	spin_unlock(&cluster_lock);

fail:
	TRACE_CLUSTER(("-sysfscluster_init\n"));
	return e;
}
Esempio n. 9
0
void tegra_init_cache(bool init)
{
#ifdef CONFIG_TRUSTED_FOUNDATIONS
	/* enable/re-enable of L2 handled by secureos */
	return tegra_init_cache_tz(init);
#else
	void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
	u32 aux_ctrl;

#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
	writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
	writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);

#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
#ifdef CONFIG_TEGRA_SILICON_PLATFORM
	/* PL310 RAM latency is CPU dependent. NOTE: Changes here
	   must also be reflected in __cortex_a9_l2x0_restart */

	if (is_lp_cluster()) {
		writel(0x221, p + L2X0_TAG_LATENCY_CTRL);
		writel(0x221, p + L2X0_DATA_LATENCY_CTRL);
	} else {
		u32 speedo;

		/* relax l2-cache latency for speedos 4,5,6 (T33's chips) */
		speedo = tegra_cpu_speedo_id();
		if (speedo == 4 || speedo == 5 || speedo == 6 ||
		    speedo == 12 || speedo == 13) {
			writel(0x442, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x552, p + L2X0_DATA_LATENCY_CTRL);
		} else {
			writel(0x441, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x551, p + L2X0_DATA_LATENCY_CTRL);
		}
	}
#else
	writel(0x770, p + L2X0_TAG_LATENCY_CTRL);
	writel(0x770, p + L2X0_DATA_LATENCY_CTRL);
#endif
#endif
	writel(0x3, p + L2X0_POWER_CTRL);
	aux_ctrl = readl(p + L2X0_CACHE_TYPE);
	aux_ctrl = (aux_ctrl & 0x700) << (17-8);
	aux_ctrl |= 0x7C000001;
	if (init) {
		l2x0_init(p, aux_ctrl, 0x8200c3fe);
		/* use our outer_disable() routine to avoid flush */
		outer_cache.disable = tegra_l2x0_disable;
	} else {
		u32 tmp;

		tmp = aux_ctrl;
		aux_ctrl = readl(p + L2X0_AUX_CTRL);
		aux_ctrl &= 0x8200c3fe;
		aux_ctrl |= tmp;
		writel(aux_ctrl, p + L2X0_AUX_CTRL);
	}
	l2x0_enable();
#endif
}
Esempio n. 10
0
int tegra_cpuquiet_force_gmode(void)
{
    cputime64_t on_time = 0;

	if (no_lp)
		return -EBUSY;
		
	if (!is_g_cluster_present())
		return -EBUSY;

	if (cpq_state == TEGRA_CPQ_DISABLED)
		return -EBUSY;

	if (is_lp_cluster()) {
		mutex_lock(tegra3_cpu_lock);

		if (switch_clk_to_gmode()) {
			pr_err(CPUQUIET_TAG "tegra_cpuquiet_force_gmode - switch_clk_to_gmode failed\n");
    		mutex_unlock(tegra3_cpu_lock);
    		return -EBUSY;
		}
		
		on_time = ktime_to_ms(ktime_get()) - lp_on_time;
		show_status("LP -> off - force", on_time, -1);

    	mutex_unlock(tegra3_cpu_lock);

		if (!manual_hotplug)
			cpuquiet_device_free();
	}
	
	return 0;
}
Esempio n. 11
0
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int status;

	/* Avoid timer calibration on slave cpus. Use the value calibrated
	 * on master cpu. This reduces the bringup time for each slave cpu
	 * by around 260ms.
	 */
	preset_lpj = loops_per_jiffy;
	if (is_lp_cluster()) {
		struct clk *cpu_clk, *cpu_g_clk;

		/* The G CPU may not be available for a variety of reasons. */
		status = is_g_cluster_available(cpu);
		if (status)
			goto done;

		cpu_clk = tegra_get_clock_by_name("cpu");
		cpu_g_clk = tegra_get_clock_by_name("cpu_g");

		/* Switch to G CPU before continuing. */
		if (!cpu_clk || !cpu_g_clk) {
			/* Early boot, clock infrastructure is not initialized
			   - CPU mode switch is not allowed */
			status = -EINVAL;
		} else
			status = clk_set_parent(cpu_clk, cpu_g_clk);

		if (status)
			goto done;
	}

	smp_wmb();

	/* Force the CPU into reset. The CPU must remain in reset when the
	   flow controller state is cleared (which will cause the flow
	   controller to stop driving reset if the CPU has been power-gated
	   via the flow controller). This will have no effect on first boot
	   of the CPU since it should already be in reset. */
	writel(CPU_RESET(cpu), CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
	dmb();

	/* Unhalt the CPU. If the flow controller was used to power-gate the
	   CPU this will cause the flow controller to stop driving reset.
	   The CPU will remain in reset because the clock and reset block
	   is now driving reset. */
	flowctrl_writel(0, FLOW_CTRL_HALT_CPU(cpu));

	status = power_up_cpu(cpu);
	if (status)
		goto done;

	/* Take the CPU out of reset. */
	writel(CPU_RESET(cpu), CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
	wmb();
done:
	return status;
}
Esempio n. 12
0
static void tegra_cpuquiet_work_func(struct work_struct *work)
{
	int device_busy = -1;

	mutex_lock(tegra3_cpu_lock);

	switch(cpq_state) {
		case TEGRA_CPQ_DISABLED:
		case TEGRA_CPQ_IDLE:
			break;
		case TEGRA_CPQ_SWITCH_TO_G:
			if (is_lp_cluster()) {
				if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
					/*catch-up with governor target speed */
					tegra_cpu_set_speed_cap(NULL);
					/* process pending core requests*/
					device_busy = 0;
				}
			}
			break;
		case TEGRA_CPQ_SWITCH_TO_LP:
			if (!is_lp_cluster() && !no_lp &&
				!pm_qos_request(PM_QOS_MIN_ONLINE_CPUS)
				&& num_online_cpus() == 1) {
				if (!clk_set_parent(cpu_clk, cpu_lp_clk)) {
					/*catch-up with governor target speed*/
					tegra_cpu_set_speed_cap(NULL);
					device_busy = 1;
				}
			}
			break;
		default:
			pr_err("%s: invalid tegra hotplug state %d\n",
		       __func__, cpq_state);
	}

	mutex_unlock(tegra3_cpu_lock);

	if (device_busy == 1) {
		cpuquiet_device_busy();
	} else if (!device_busy) {
		apply_core_config();
		cpuquiet_device_free();
	}
}
Esempio n. 13
0
void tegra_gic_affinity_to_cpu0(void)
{
	unsigned int i;

	BUG_ON(is_lp_cluster());

	for (i = 32; i < INT_GIC_NR; i += 4)
		__raw_writel(0x01010101, gic_dist_base + GIC_DIST_TARGET + i);
	wmb();
}
static void tegra_auto_cpuplug_work_func(struct work_struct *work)
{
	bool up = false;
	unsigned int cpu = nr_cpu_ids;

	mutex_lock(tegra3_cpu_lock);
	if (hp_state != TEGRA_HP_DISABLED) {
		switch (last_state) {
		case TEGRA_HP_UP:
			cpu = cpumask_next_zero(0, cpu_online_mask);
			if (cpu < nr_cpu_ids) {
				up = true;
				hp_stats_update(cpu, true);
			}
			break;
		case TEGRA_HP_DOWN:
			cpu = tegra_get_slowest_cpu_n();
			if (cpu < nr_cpu_ids) {
				up = false;
				hp_stats_update(cpu, false);
			} else if (!is_lp_cluster() && !no_lp) {
				if (!clk_set_parent(cpu_clk, cpu_lp_clk)) {
					CPU_DEBUG_PRINTK(CPU_DEBUG_HOTPLUG, " ENTER LPCPU");
					hp_stats_update(CONFIG_NR_CPUS, true);
					hp_stats_update(0, false);
					/* catch-up with governor target speed */
					tegra_cpu_set_speed_cap(NULL);
				} else
					pr_err(CPU_HOTPLUG_TAG" clk_set_parent fail\n");
			}
			break;
		}
	}
	mutex_unlock(tegra3_cpu_lock);

	if (system_state > SYSTEM_RUNNING) {
		pr_info(CPU_HOTPLUG_TAG" SYSTEM is not running\n");
	} else if (cpu < nr_cpu_ids) {
		if (up) {
			updateCurrentCPUTotalActiveTime();
			cpu_up(cpu);
			pr_info(CPU_HOTPLUG_TAG" TURN ON CPU %d, online CPU 0-3=[%d%d%d%d]\n",
					cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
		} else {
			updateCurrentCPUTotalActiveTime();
			cpu_down(cpu);
			pr_info(CPU_HOTPLUG_TAG" TURN OFF CPU %d, online CPU 0-3=[%d%d%d%d]\n",
					cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
		}
	}

	mutex_lock(tegra3_cpu_lock);
	is_plugging = false;
	mutex_unlock(tegra3_cpu_lock);
}
static ssize_t sysfscluster_show(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf)
{
	ClusterAttr type;
	ssize_t len;

	TRACE_CLUSTER(("+sysfscluster_show\n"));

	type = GetClusterAttr(attr->attr.name);
	switch (type) {
	case ClusterAttr_Active:
		len = sprintf(buf, "%s\n", is_lp_cluster() ? "LP" : "G");
		break;

	case ClusterAttr_Immediate:
		len = sprintf(buf, "%d\n",
			      ((flags & TEGRA_POWER_CLUSTER_IMMEDIATE) != 0));
		break;

	case ClusterAttr_Force:
		len = sprintf(buf, "%d\n",
			      ((flags & TEGRA_POWER_CLUSTER_FORCE) != 0));
		break;

	case ClusterAttr_WakeMs:
		len = sprintf(buf, "%d\n", wake_ms);
		break;

#if defined(CONFIG_PM_SLEEP) && SYSFS_CLUSTER_POWER_MODE
	case ClusterAttr_PowerMode:
		len = sprintf(buf, "%d\n", power_mode);
		break;
#endif

#ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE
	case ClusterAttr_PowerGate:
		len = sprintf(buf, "%s\n", decode_power_gate(power_gate));
		break;
#endif

#if DEBUG_CLUSTER_SWITCH
	case ClusterAttr_Debug:
		len = sprintf(buf, "%d\n", tegra_cluster_debug);
		break;
#endif

	default:
		len = sprintf(buf, "invalid\n");
		break;
	}

	TRACE_CLUSTER(("-sysfscluster_show\n"));
	return len;
}
Esempio n. 16
0
void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
{
	if (!is_g_cluster_present())
		return;

	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	if (suspend) {
		cpq_state = TEGRA_CPQ_IDLE;

		/* Switch to G-mode if suspend rate is high enough */
		if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
			clk_set_parent(cpu_clk, cpu_g_clk);
			cpuquiet_device_free();
		}
		return;
	}

	if (is_lp_cluster() && pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
		if (cpq_state != TEGRA_CPQ_SWITCH_TO_G) {
			/* Force switch */
			cpq_state = TEGRA_CPQ_SWITCH_TO_G;
			queue_delayed_work(
				cpuquiet_wq, &cpuquiet_work, up_delay);
		}
		return;
	}

	if (is_lp_cluster() && (cpu_freq >= idle_top_freq || no_lp)) {
		cpq_state = TEGRA_CPQ_SWITCH_TO_G;
		queue_delayed_work(cpuquiet_wq, &cpuquiet_work, up_delay);
	} else if (!is_lp_cluster() && !no_lp &&
		   cpu_freq <= idle_bottom_freq) {
		cpq_state = TEGRA_CPQ_SWITCH_TO_LP;
		queue_delayed_work(cpuquiet_wq, &cpuquiet_work, down_delay);
	} else {
		cpq_state = TEGRA_CPQ_IDLE;
	}
}
Esempio n. 17
0
static void apply_core_config(void)
{
	unsigned int cpu;

	if (is_lp_cluster() || cpq_state == TEGRA_CPQ_DISABLED)
		return;

	for_each_cpu_mask(cpu, cr_online_requests) {
		if (cpu < nr_cpu_ids && !cpu_online(cpu))
			if (!tegra_wake_cpu(cpu))
				cpumask_clear_cpu(cpu, &cr_online_requests);
	}
}
Esempio n. 18
0
static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
{
	mutex_lock(tegra3_cpu_lock);

	if ((n >= 2) && is_lp_cluster()) {
		if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
			hp_stats_update(CONFIG_NR_CPUS, false);
			hp_stats_update(0, true);
		}
	}
	/* update governor state machine */
	tegra_cpu_set_speed_cap(NULL);
	mutex_unlock(tegra3_cpu_lock);
	return NOTIFY_OK;
}
Esempio n. 19
0
void tegra_gic_disable_affinity(void)
{
	unsigned int i;

	BUG_ON(is_lp_cluster());

	/* The GIC distributor TARGET register is one byte per IRQ. */
	for (i = 32; i < INT_GIC_NR; i += 4) {
		/* Save the affinity. */
		gic_affinity[i/4] = __raw_readl(gic_dist_base +
						GIC_DIST_TARGET + i);

		/* Force this interrupt to CPU0. */
		__raw_writel(0x01010101, gic_dist_base + GIC_DIST_TARGET + i);
	}

	wmb();
}
Esempio n. 20
0
static void tegra_check_limited_max_cores(void)
{
	if(is_lp_cluster())
	{
		if(cpufreq_limited_max_cores_cur != cpufreq_limited_max_cores_expected)
		{
			switch(cpufreq_limited_max_cores_expected)
			{
				case 1:
					set_cpu_present(1,false);
					set_cpu_present(2,false);
					set_cpu_present(3,false);
					set_cpu_possible(1,false);
					set_cpu_possible(2,false);
					set_cpu_possible(3,false);
					cpufreq_limited_max_cores_cur = cpufreq_limited_max_cores_expected;
					break;
				case 2:
					set_cpu_present(1,true);
					set_cpu_present(2,false);
					set_cpu_present(3,false);
					set_cpu_possible(1,true);
					set_cpu_possible(2,false);
					set_cpu_possible(3,false);
					cpufreq_limited_max_cores_cur = cpufreq_limited_max_cores_expected;
					break;
				case 4:
					set_cpu_present(1,true);
					set_cpu_present(2,true);
					set_cpu_present(3,true);
					set_cpu_possible(1,true);
					set_cpu_possible(2,true);
					set_cpu_possible(3,true);
					cpufreq_limited_max_cores_cur = cpufreq_limited_max_cores_expected;
					break;
				default:
					cpufreq_limited_max_cores_expected = cpufreq_limited_max_cores_cur;
					break;
			}
		}
	}

}
Esempio n. 21
0
static void cpu_core_state_workfunc(struct work_struct *work)
{
	int i = 0;
	int cpu = 0;

	for (i = 0; i < 3; i++){
		cpu = i + 1;
		if (cpu_core_state[i] == 0 && cpu_online(cpu)){
			show_status("DOWN", 0, cpu);
			cpu_down(cpu);
		} else if (cpu_core_state[i] == 1 && !cpu_online(cpu)){
			if (is_lp_cluster())
				tegra_cpuquiet_force_gmode();
			
			show_status("UP", 0, cpu);
			cpu_up(cpu);
		}
	}
}
Esempio n. 22
0
static void min_max_constraints_workfunc(struct work_struct *work)
{
	int count = -1;
	bool up = false;
	unsigned int cpu;

	int nr_cpus = num_online_cpus();
	int max_cpus = tegra_cpq_max_cpus();
	int min_cpus = tegra_cpq_min_cpus();
	
	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	if (is_lp_cluster())
		return;

	if (nr_cpus < min_cpus) {
		up = true;
		count = min_cpus - nr_cpus;
	} else if (nr_cpus > max_cpus && max_cpus >= min_cpus) {
		count = nr_cpus - max_cpus;
	}

	for (;count > 0; count--) {
		if (up) {
			cpu = best_core_to_turn_up();
			if (cpu < nr_cpu_ids){
				show_status("UP", 0, cpu);
				cpu_up(cpu);
			}
			else
				break;
		} else {
			cpu = cpumask_next(0, cpu_online_mask);
			if (cpu < nr_cpu_ids){
				show_status("DOWN", 0, cpu);
				cpu_down(cpu);
			}
			else
				break;
		}
	}
}
Esempio n. 23
0
void tegra_gic_restore_affinity(void)
{
	unsigned int i;

	BUG_ON(is_lp_cluster());

	/* The GIC distributor TARGET register is one byte per IRQ. */
	for (i = 32; i < INT_GIC_NR; i += 4) {
#ifdef CONFIG_BUG
		u32 reg = __raw_readl(gic_dist_base + GIC_DIST_TARGET + i);
		if (reg & 0xFEFEFEFE)
			panic("GIC affinity changed!");
#endif
		/* Restore this interrupt's affinity. */
		__raw_writel(gic_affinity[i/4], gic_dist_base +
			     GIC_DIST_TARGET + i);
	}

	wmb();
}
static void hp_init_stats(void)
{
    int i;
    u64 cur_jiffies = get_jiffies_64();

    for (i = 0; i <= CONFIG_NR_CPUS; i++) {
        hp_stats[i].time_up_total = 0;
        hp_stats[i].last_update = cur_jiffies;

        hp_stats[i].up_down_count = 0;
        if (is_lp_cluster()) {
            if (i == CONFIG_NR_CPUS)
                hp_stats[i].up_down_count = 1;
        } else {
            if ((i < nr_cpu_ids) && cpu_online(i))
                hp_stats[i].up_down_count = 1;
        }
    }

}
static int min_cpus_notify(struct notifier_block *nb, unsigned long n, void *p)
{
    mutex_lock(tegra3_cpu_lock);

    if ((n >= 1) && is_lp_cluster()) {
        /* make sure cpu rate is within g-mode range before switching */
        unsigned int speed = max(
                                 tegra_getspeed(0), clk_get_min_rate(cpu_g_clk) / 1000);
        tegra_update_cpu_speed(speed);

        if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
            hp_stats_update(CONFIG_NR_CPUS, false);
            hp_stats_update(0, true);
        }
    }
    /* update governor state machine */
    tegra_cpu_set_speed_cap(NULL);
    mutex_unlock(tegra3_cpu_lock);
    return NOTIFY_OK;
}
Esempio n. 26
0
static void min_max_constraints_workfunc(struct work_struct *work)
{
	int count = -1;
	bool up = false;
	unsigned int cpu;

	int nr_cpus = num_online_cpus();
	int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
	int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);

	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	if (is_lp_cluster())
		return;

	if (nr_cpus < min_cpus) {
		up = true;
		count = min_cpus - nr_cpus;
	} else if (nr_cpus > max_cpus && max_cpus >= min_cpus) {
		count = nr_cpus - max_cpus;
	}

	for (;count > 0; count--) {
		if (up) {
			cpu = cpumask_next_zero(0, cpu_online_mask);
			if (cpu < nr_cpu_ids)
				cpu_up(cpu);
			else
				break;
		} else {
			cpu = cpumask_next(0, cpu_online_mask);
			if (cpu < nr_cpu_ids)
				cpu_down(cpu);
			else
				break;
		}
	}
}
Esempio n. 27
0
static inline void show_status(const char* extra, cputime64_t on_time, int cpu)
{
	if(on_time)
		hotplug_info("%s Mask=[%d.%d%d%d%d]|lp_on_time=%llu\n",
    		extra, is_lp_cluster(), ((is_lp_cluster() == 1) ? 0 : cpu_online(0)),
        	cpu_online(1), cpu_online(2), cpu_online(3), on_time);
	else		
		if(cpu>0)
			hotplug_info("%s %d Mask=[%d.%d%d%d%d]\n",
    			extra, cpu, is_lp_cluster(), ((is_lp_cluster() == 1) ? 0 : cpu_online(0)),
        		cpu_online(1), cpu_online(2), cpu_online(3));

		else
			hotplug_info("%s Mask=[%d.%d%d%d%d]\n",
    			extra, is_lp_cluster(), ((is_lp_cluster() == 1) ? 0 : cpu_online(0)),
        		cpu_online(1), cpu_online(2), cpu_online(3));
}
void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
{
    unsigned long up_delay, top_freq, bottom_freq;

    if (!is_g_cluster_present())
        return;

    if (hp_state == TEGRA_HP_DISABLED)
        return;

    if (suspend) {
        hp_state = TEGRA_HP_IDLE;

        /* Switch to G-mode if suspend rate is high enough */
        if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
            if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
                hp_stats_update(CONFIG_NR_CPUS, false);
                hp_stats_update(0, true);
            }
        }
        return;
    }

    if (is_lp_cluster()) {
        up_delay = up2g0_delay;
        top_freq = idle_top_freq;
        bottom_freq = 0;
    } else {
        up_delay = up2gn_delay;
        top_freq = idle_bottom_freq;
        bottom_freq = idle_bottom_freq;
    }

    if (pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
        if (hp_state != TEGRA_HP_UP) {
            hp_state = TEGRA_HP_UP;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, up_delay);
        }
        return;
    }

    switch (hp_state) {
    case TEGRA_HP_IDLE:
        if (cpu_freq > top_freq) {
            hp_state = TEGRA_HP_UP;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, up_delay);
        } else if (cpu_freq <= bottom_freq) {
            hp_state = TEGRA_HP_DOWN;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, down_delay);
        }
        break;
    case TEGRA_HP_DOWN:
        if (cpu_freq > top_freq) {
            hp_state = TEGRA_HP_UP;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, up_delay);
        } else if (cpu_freq > bottom_freq) {
            hp_state = TEGRA_HP_IDLE;
        }
        break;
    case TEGRA_HP_UP:
        if (cpu_freq <= bottom_freq) {
            hp_state = TEGRA_HP_DOWN;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, down_delay);
        } else if (cpu_freq <= top_freq) {
            hp_state = TEGRA_HP_IDLE;
        }
        break;
    default:
        pr_err("%s: invalid tegra hotplug state %d\n",
               __func__, hp_state);
        BUG();
    }
}
static void tegra_auto_hotplug_work_func(struct work_struct *work)
{
    bool up = false;
    unsigned int cpu = nr_cpu_ids;
    unsigned long now = jiffies;
    static unsigned long last_change_time;

    mutex_lock(tegra3_cpu_lock);

    switch (hp_state) {
    case TEGRA_HP_DISABLED:
    case TEGRA_HP_IDLE:
        break;
    case TEGRA_HP_DOWN:
        cpu = tegra_get_slowest_cpu_n();
        if (cpu < nr_cpu_ids) {
            up = false;
        } else if (!is_lp_cluster() && !no_lp &&
                   !pm_qos_request(PM_QOS_MIN_ONLINE_CPUS)) {
            if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
                hp_stats_update(CONFIG_NR_CPUS, true);
                hp_stats_update(0, false);
                /* catch-up with governor target speed */
                tegra_cpu_set_speed_cap(NULL);
                break;
            }
        }
        queue_delayed_work(
            hotplug_wq, &hotplug_work, down_delay);
        break;
    case TEGRA_HP_UP:
        if (is_lp_cluster() && !no_lp) {
            if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
                hp_stats_update(CONFIG_NR_CPUS, false);
                hp_stats_update(0, true);
                /* catch-up with governor target speed */
                tegra_cpu_set_speed_cap(NULL);
            }
        } else {
            switch (tegra_cpu_speed_balance()) {
            /* cpu speed is up and balanced - one more on-line */
            case TEGRA_CPU_SPEED_BALANCED:
                cpu = cpumask_next_zero(0, cpu_online_mask);
                if (cpu < nr_cpu_ids)
                    up = true;
                break;
            /* cpu speed is up, but skewed - remove one core */
            case TEGRA_CPU_SPEED_SKEWED:
                cpu = tegra_get_slowest_cpu_n();
                if (cpu < nr_cpu_ids)
                    up = false;
                break;
            /* cpu speed is up, but under-utilized - do nothing */
            case TEGRA_CPU_SPEED_BIASED:
            default:
                break;
            }
        }
        queue_delayed_work(
            hotplug_wq, &hotplug_work, up2gn_delay);
        break;
    default:
        pr_err("%s: invalid tegra hotplug state %d\n",
               __func__, hp_state);
    }

    if (!up && ((now - last_change_time) < down_delay))
        cpu = nr_cpu_ids;

    if (cpu < nr_cpu_ids) {
        last_change_time = now;
        hp_stats_update(cpu, up);
    }
    mutex_unlock(tegra3_cpu_lock);

    if (cpu < nr_cpu_ids) {
        if (up) {
            printk("cpu_up(%u)+\n",cpu);
            cpu_up(cpu);
            printk("cpu_up(%u)-\n",cpu);
        } else {
            printk("cpu_down(%u)+\n",cpu);
            cpu_down(cpu);
            printk("cpu_down(%u)-\n",cpu);
        }
    }
}
Esempio n. 30
0
void tegra_init_cache(bool init)
{
	void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
	u32 aux_ctrl;
	u32 speedo;
	u32 tmp;

#ifdef CONFIG_TRUSTED_FOUNDATIONS
	/* issue the SMC to enable the L2 */
	aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
	tegra_cache_smc(true, aux_ctrl);

	/* after init, reread aux_ctrl and register handlers */
	aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
	l2x0_init(p, aux_ctrl, 0xFFFFFFFF);

	/* override outer_disable() with our disable */
	outer_cache.disable = tegra_l2x0_disable;
#else
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
	writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
	writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);

#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
#ifdef CONFIG_TEGRA_SILICON_PLATFORM
	/* PL310 RAM latency is CPU dependent. NOTE: Changes here
	   must also be reflected in __cortex_a9_l2x0_restart */

	if (is_lp_cluster()) {
		writel(0x221, p + L2X0_TAG_LATENCY_CTRL);
		writel(0x221, p + L2X0_DATA_LATENCY_CTRL);
	} else {
		/* relax l2-cache latency for speedos 4,5,6 (T33's chips) */
		speedo = tegra_cpu_speedo_id();
		if (speedo == 4 || speedo == 5 || speedo == 6 ||
		    speedo == 12 || speedo == 13) {
			writel(0x442, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x552, p + L2X0_DATA_LATENCY_CTRL);
		} else {
			writel(0x441, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x551, p + L2X0_DATA_LATENCY_CTRL);
		}
	}
#else
	writel(0x770, p + L2X0_TAG_LATENCY_CTRL);
	writel(0x770, p + L2X0_DATA_LATENCY_CTRL);
#endif
#endif
	aux_ctrl = readl(p + L2X0_CACHE_TYPE);
	aux_ctrl = (aux_ctrl & 0x700) << (17-8);
	aux_ctrl |= 0x7C000001;
	if (init) {
		l2x0_init(p, aux_ctrl, 0x8200c3fe);
	} else {
		tmp = aux_ctrl;
		aux_ctrl = readl(p + L2X0_AUX_CTRL);
		aux_ctrl &= 0x8200c3fe;
		aux_ctrl |= tmp;
		writel(aux_ctrl, p + L2X0_AUX_CTRL);
	}
	l2x0_enable();
#endif
}