Ejemplo n.º 1
0
static int tegra3_get_core_floor_mv(int cpu_mv)
{
#if 0
	if (cpu_mv < 800)
		return  950;
	if (cpu_mv < 900)
		return 1000;
	if (cpu_mv < 1000)
		return 1100;
	if ((tegra_cpu_speedo_id() < 2) ||
	    (tegra_cpu_speedo_id() == 4) ||
	    (tegra_cpu_speedo_id() == 7) ||
	    (tegra_cpu_speedo_id() == 8))
		return 1200;
	if (cpu_mv < 1100)
		return 1200;
	if (cpu_mv <= 1250)
		return 1300;
#endif
	int i;
	
	for (i=0; i<ARRAY_SIZE(core_millivolts) && core_millivolts[i] < cpu_mv; i++);
	if (i<ARRAY_SIZE(core_millivolts)){
		return core_millivolts[i];
	}
	
	/* fail-safe */
	if (cpu_mv <= VDD_CPU_MAX)
		return 1300;

	BUG();
}
static int tegra3_get_core_floor_mv(int cpu_mv)
{
	if (cpu_mv <= 825)
		return 1000;
	if (cpu_mv <=  975)
		return 1100;
	if ((tegra_cpu_speedo_id() < 2) ||
	    (tegra_cpu_speedo_id() == 4))
		return 1200;
	if (cpu_mv <= 1075)
		return 1200;
	if (cpu_mv <= 1250)
		return 1300;
	BUG();
}
Ejemplo n.º 3
0
static bool __init is_pllm_dvfs(struct clk *c, struct dvfs *d)
{
#ifdef CONFIG_TEGRA_PLLM_RESTRICTED
	/* Do not apply common PLLM dvfs table on T30 and T33, rev A02+ and
	   do not apply restricted PLLM dvfs table for other SKUs/revs */
	if (((tegra_cpu_speedo_id() == 2) || (tegra_cpu_speedo_id() == 5)) ==
	    (d->speedo_id == -1))
		return false;
#endif
	/* Check if PLLM boot frequency can be applied to clock tree at
	   minimum voltage. If yes, no need to enable dvfs on PLLM */
	if (clk_get_rate_all_locked(c) <= d->freqs[0] * d->freqs_mult)
		return false;

	return true;
}
Ejemplo n.º 4
0
static bool __init is_pllm_dvfs(struct clk *c, struct dvfs *d)
{
#ifdef CONFIG_TEGRA_PLLM_RESTRICTED
	/* Restricting PLLM usage on T30 and T33, rev A02+, allows to apply
	   maximum PLLM frequency to clock tree at minimum core voltage;
	   no need to enable dvfs on PLLM in this case */
	if ((tegra_cpu_speedo_id() == 2) || (tegra_cpu_speedo_id() == 5))
	   return false;
#endif
	/* Check if PLLM boot frequency can be applied to clock tree at
	   minimum voltage. If yes, no need to enable dvfs on PLLM */
	if (clk_get_rate_all_locked(c) <= d->freqs[0] * d->freqs_mult)
		return false;

	return true;
}
Ejemplo n.º 5
0
void tegra_init_cache(bool init)
{
#ifdef CONFIG_TRUSTED_FOUNDATIONS
	/* enable/re-enable of L2 handled by secureos */
	return tegra_init_cache_tz(init);
#else
	void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
	u32 aux_ctrl;

#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
	writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
	writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);

#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
#ifdef CONFIG_TEGRA_SILICON_PLATFORM
	/* PL310 RAM latency is CPU dependent. NOTE: Changes here
	   must also be reflected in __cortex_a9_l2x0_restart */

	if (is_lp_cluster()) {
		writel(0x221, p + L2X0_TAG_LATENCY_CTRL);
		writel(0x221, p + L2X0_DATA_LATENCY_CTRL);
	} else {
		u32 speedo;

		/* relax l2-cache latency for speedos 4,5,6 (T33's chips) */
		speedo = tegra_cpu_speedo_id();
		if (speedo == 4 || speedo == 5 || speedo == 6 ||
		    speedo == 12 || speedo == 13) {
			writel(0x442, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x552, p + L2X0_DATA_LATENCY_CTRL);
		} else {
			writel(0x441, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x551, p + L2X0_DATA_LATENCY_CTRL);
		}
	}
#else
	writel(0x770, p + L2X0_TAG_LATENCY_CTRL);
	writel(0x770, p + L2X0_DATA_LATENCY_CTRL);
#endif
#endif
	writel(0x3, p + L2X0_POWER_CTRL);
	aux_ctrl = readl(p + L2X0_CACHE_TYPE);
	aux_ctrl = (aux_ctrl & 0x700) << (17-8);
	aux_ctrl |= 0x7C000001;
	if (init) {
		l2x0_init(p, aux_ctrl, 0x8200c3fe);
		/* use our outer_disable() routine to avoid flush */
		outer_cache.disable = tegra_l2x0_disable;
	} else {
		u32 tmp;

		tmp = aux_ctrl;
		aux_ctrl = readl(p + L2X0_AUX_CTRL);
		aux_ctrl &= 0x8200c3fe;
		aux_ctrl |= tmp;
		writel(aux_ctrl, p + L2X0_AUX_CTRL);
	}
	l2x0_enable();
#endif
}
Ejemplo n.º 6
0
static int tegra3_get_core_floor_mv(int cpu_mv)
{
	if (cpu_mv < 800)
		return  950;
	if (cpu_mv < 1000)
		return 1100;
	if ((tegra_cpu_speedo_id() < 2) ||
	    (tegra_cpu_speedo_id() == 4) ||
	    (tegra_cpu_speedo_id() == 7) ||
	    (tegra_cpu_speedo_id() == 8))
		return 1200;
	if (cpu_mv < 1100)
		return 1200;
	if (cpu_mv <= 1250)
		return 1300;
	BUG();
}
Ejemplo n.º 7
0
/**
 * Adjust VDD_CPU to offset aging.
 * 25mV for 1st year
 * 12mV for 2nd and 3rd year
 * 0mV for 4th year onwards
 */
void tegra_dvfs_age_cpu(int cur_linear_age)
{
	int chip_linear_age;
	int chip_life;
	chip_linear_age = tegra_get_age();
	chip_life = cur_linear_age - chip_linear_age;

	/*For T37 and AP37*/
	if (tegra_cpu_speedo_id() == 12 || tegra_cpu_speedo_id() == 13) {
		if (chip_linear_age <= 0) {
			return;
		} else if (chip_life <= 12) {
			tegra_adjust_cpu_mvs(25);
		} else if (chip_life <= 36) {
			tegra_adjust_cpu_mvs(13);
		}
	}
}
Ejemplo n.º 8
0
/*
 * Specify regulator current in mA, e.g. 5000mA
 * Use 0 for default
 */
void __init tegra_init_cpu_edp_limits(unsigned int regulator_mA)
{
	int cpu_speedo_id = tegra_cpu_speedo_id();
	int i, j;
	struct tegra_edp_limits *e;
	struct tegra_edp_entry *t = (struct tegra_edp_entry *)tegra_edp_map;
	int tsize = sizeof(tegra_edp_map)/sizeof(struct tegra_edp_entry);

	if (!regulator_mA) {
		edp_limits = edp_default_limits;
		edp_limits_size = ARRAY_SIZE(edp_default_limits);
		return;
	}
	regulator_cur = regulator_mA;

	for (i = 0; i < tsize; i++) {
		if (t[i].speedo_id == cpu_speedo_id &&
		    t[i].regulator_100mA <= regulator_mA / 100)
			break;
	}

	/* No entry found in tegra_edp_map */
	if (i >= tsize) {
		edp_limits = edp_default_limits;
		edp_limits_size = ARRAY_SIZE(edp_default_limits);
		return;
	}

	/* Find all rows for this entry */
	for (j = i + 1; j < tsize; j++) {
		if (t[i].speedo_id != t[j].speedo_id ||
		    t[i].regulator_100mA != t[j].regulator_100mA)
			break;
	}

	edp_limits_size = j - i;
	e = kmalloc(sizeof(struct tegra_edp_limits) * edp_limits_size,
		    GFP_KERNEL);
	BUG_ON(!e);

	for (j = 0; j < edp_limits_size; j++) {
		e[j].temperature = (int)t[i+j].temperature;
#ifdef CONFIG_TEGRA3_GAMING_FIX
        e[j].freq_limits[0] = (unsigned int)(t[i+j].freq_limits[0]-GAMING_REDUCTION_FREQ) * 10000;
#else
		e[j].freq_limits[0] = (unsigned int)t[i+j].freq_limits[0] * 10000;
#endif
		e[j].freq_limits[1] = (unsigned int)t[i+j].freq_limits[1] * 10000;
		e[j].freq_limits[2] = (unsigned int)t[i+j].freq_limits[2] * 10000;
		e[j].freq_limits[3] = (unsigned int)t[i+j].freq_limits[3] * 10000;
	}

	if (edp_limits != edp_default_limits)
		kfree(edp_limits);

	edp_limits = e;
}
Ejemplo n.º 9
0
static int tegra3_get_core_floor_mv(int cpu_mv)
{
		if (cpu_mv < 800)
			return  core_millivolts[0];
		if (cpu_mv < 900)
			return core_millivolts[1];
		if (cpu_mv < 1000)
			return core_millivolts[3];
		if ((tegra_cpu_speedo_id() < 2) ||
		    (tegra_cpu_speedo_id() == 4) ||
		    (tegra_cpu_speedo_id() == 7) ||
		    (tegra_cpu_speedo_id() == 8))
			return core_millivolts[5];
		if (cpu_mv < 1100)
			return core_millivolts[5];
		if (cpu_mv <= 1250)
			return core_millivolts[7];
		BUG();
}
Ejemplo n.º 10
0
static int t3_variant_debugfs_show(struct seq_file *s, void *data)
{
	int cpu_speedo_id = tegra_cpu_speedo_id();
	int soc_speedo_id = tegra_soc_speedo_id();
	int cpu_process_id = tegra_cpu_process_id();
	int core_process_id = tegra_core_process_id();

	seq_printf(s, "cpu_speedo_id => %d\n", cpu_speedo_id);
	seq_printf(s, "soc_speedo_id => %d\n", soc_speedo_id);
	seq_printf(s, "cpu_process_id => %d\n", cpu_process_id);
	seq_printf(s, "core_process_id => %d\n", core_process_id);

	return 0;
}
Ejemplo n.º 11
0
static int __init init_cpu_edp_limits_lookup(void)
{
	int i, j;
	struct tegra_edp_limits *e;
	struct tegra_edp_vdd_cpu_entry *t;
	int tsize;
	int cpu_speedo_id = tegra_cpu_speedo_id();

	t = (struct tegra_edp_vdd_cpu_entry *)tegra_edp_vdd_cpu_map;
	tsize = sizeof(tegra_edp_vdd_cpu_map)
		/ sizeof(struct tegra_edp_vdd_cpu_entry);

	for (i = 0; i < tsize; i++) {
		if (t[i].speedo_id == cpu_speedo_id &&
		    t[i].regulator_100mA <= regulator_cur / 100)
			break;
	}

	/* No entry found in tegra_edp_vdd_cpu_map */
	if (i >= tsize)
		return -EINVAL;

	/* Find all rows for this entry */
	for (j = i + 1; j < tsize; j++) {
		if (t[i].speedo_id != t[j].speedo_id ||
		    t[i].regulator_100mA != t[j].regulator_100mA)
			break;
	}

	edp_limits_size = j - i;
	e = kmalloc(sizeof(struct tegra_edp_limits) * edp_limits_size,
		    GFP_KERNEL);
	BUG_ON(!e);

	for (j = 0; j < edp_limits_size; j++) {
		e[j].temperature = (int)t[i+j].temperature;
		e[j].freq_limits[0] = (unsigned int)t[i+j].freq_limits[0]*10000;
		e[j].freq_limits[1] = (unsigned int)t[i+j].freq_limits[1]*10000;
		e[j].freq_limits[2] = (unsigned int)t[i+j].freq_limits[2]*10000;
		e[j].freq_limits[3] = (unsigned int)t[i+j].freq_limits[3]*10000;
	}

	if (edp_limits != edp_default_limits)
		kfree(edp_limits);

	edp_limits = e;
	return 0;
}
Ejemplo n.º 12
0
void __init tegra_init_system_edp_limits(unsigned int power_limit_mW)
{
	int cpu_speedo_id = tegra_cpu_speedo_id();
	int i;
	unsigned int *e;
	struct system_edp_entry *t =
		(struct system_edp_entry *)tegra_system_edp_map;
	int tsize = sizeof(tegra_system_edp_map) /
		sizeof(struct system_edp_entry);

	if (!power_limit_mW) {
		e = NULL;
		goto out;
	}

	for (i = 0; i < tsize; i++)
		if (t[i].speedo_id == cpu_speedo_id)
			break;

	if (i >= tsize) {
		e = NULL;
		goto out;
	}

	do {
		if (t[i].power_limit_100mW <= power_limit_mW / 100)
			break;
		i++;
	} while (i < tsize && t[i].speedo_id == cpu_speedo_id);

	if (i >= tsize || t[i].speedo_id != cpu_speedo_id)
		i--; /* No low enough entry in the table, use best possible */

	e = kmalloc(sizeof(unsigned int) * 4, GFP_KERNEL);
	BUG_ON(!e);

	e[0] = (unsigned int)t[i].freq_limits[0] * 10000;
	e[1] = (unsigned int)t[i].freq_limits[1] * 10000;
	e[2] = (unsigned int)t[i].freq_limits[2] * 10000;
	e[3] = (unsigned int)t[i].freq_limits[3] * 10000;

out:
	kfree(system_edp_limits);

	system_edp_limits = e;
}
Ejemplo n.º 13
0
static int tegra3_get_core_floor_mv(int cpu_mv)
{
	if (cpu_mv <= 800)
		return 950;
	if (cpu_mv <= 900)
		return 1000;
	if (cpu_mv <= 1000)
		return 1100;
	if ((tegra_cpu_speedo_id() == 4))
		return 1200;
	if (cpu_mv <= 1100)
		return 1200;
	if (cpu_mv <= 1250)
		return 1300;
	if (cpu_mv <= 1325)
		return 1350;
	if (cpu_mv <= 1375)
		return 1400;
	BUG();
}
Ejemplo n.º 14
0
struct tegra_sysedp_corecap *tegra_get_sysedp_corecap(unsigned int *sz)
{
	int cpu_speedo_id;
	int gpu_speedo_id;

	BUG_ON(sz == NULL);

	cpu_speedo_id = tegra_cpu_speedo_id();
	gpu_speedo_id = tegra_gpu_speedo_id();

	switch (cpu_speedo_id) {
	case 0x5:
	case 0x2:
		if (gpu_speedo_id == 1) {
			/* 575 variants */
			*sz = ARRAY_SIZE(td575d_sysedp_corecap);
			return td575d_sysedp_corecap;
		} else {
			/* CD570M */
			*sz = ARRAY_SIZE(cd570m_sysedp_corecap);
			return cd570m_sysedp_corecap;
		}

	case 0x3:
	case 0x1:
		/* 580 variants */
		*sz = ARRAY_SIZE(td580d_sysedp_corecap);
		return td580d_sysedp_corecap;


	default:
		pr_warn("%s: Unknown cpu_speedo_id, 0x%x. "
			" Assuming td570d sysedp_corecap table.\n",
			__func__, cpu_speedo_id);
		/* intentional fall-through */
	case 0x0:
		/* 570 variants */
		*sz = ARRAY_SIZE(td570d_sysedp_corecap);
		return td570d_sysedp_corecap;
	}
}
Ejemplo n.º 15
0
Archivo: edp.c Proyecto: Arvoreen/ATnT
/*
 * Specify regulator current in mA, e.g. 5000mA
 * Use 0 for default
 */
void __init tegra_init_cpu_edp_limits(unsigned int regulator_mA)
{
	int cpu_speedo_id = tegra_cpu_speedo_id();
	int i, j;
	struct tegra_edp_limits *e;
	struct tegra_edp_limits *f;
	struct tegra_edp_entry *t = (struct tegra_edp_entry *)tegra_edp_map;
	int tsize = sizeof(tegra_edp_map)/sizeof(struct tegra_edp_entry);

	if (!regulator_mA) {
		edp_limits = edp_default_limits;
		edp_limits_size = ARRAY_SIZE(edp_default_limits);
		return;
	}
	regulator_cur = regulator_mA;

	for (i = 0; i < tsize; i++) {
		if (t[i].speedo_id == cpu_speedo_id &&
		    t[i].regulator_100mA <= regulator_mA / 100)
			break;
	}

	/* No entry found in tegra_edp_map */
	if (i >= tsize) {
		edp_limits = edp_default_limits;
		edp_limits_size = ARRAY_SIZE(edp_default_limits);
		return;
	}

	/* Find all rows for this entry */
	for (j = i + 1; j < tsize; j++) {
		if (t[i].speedo_id != t[j].speedo_id ||
		    t[i].regulator_100mA != t[j].regulator_100mA)
			break;
	}

	edp_limits_size = j - i;
	e = kmalloc(sizeof(struct tegra_edp_limits) * edp_limits_size,
		    GFP_KERNEL);
	BUG_ON(!e);

	for (j = 0; j < edp_limits_size; j++) {
		e[j].temperature = (int)t[i+j].temperature;
		e[j].freq_limits[0] = (unsigned int)t[i+j].freq_limits[0] * 10000;
		e[j].freq_limits[1] = (unsigned int)(t[i+j].freq_limits[1] + 10) * 10000;
		e[j].freq_limits[2] = (unsigned int)(t[i+j].freq_limits[2] + 10) * 10000;
		e[j].freq_limits[3] = (unsigned int)(t[i+j].freq_limits[3] + 10) * 10000;
	}

	f = kmalloc(sizeof(struct tegra_edp_limits) * edp_limits_size,
                    GFP_KERNEL);
	BUG_ON(!f);

	memcpy(f, e, sizeof(struct tegra_edp_limits) * edp_limits_size);

	BUG_ON(MAX_TEGRA_EDP_LIMITS < edp_limits_size);

	memcpy(edp_limits_table, e, sizeof(struct tegra_edp_limits) * edp_limits_size);

	if (edp_limits != edp_default_limits)
		kfree(edp_limits);

	default_table = f;
	edp_limits = e;
}
Ejemplo n.º 16
0
void __init tegra_soc_init_dvfs(void)
{
	int cpu_speedo_id = tegra_cpu_speedo_id();
	int soc_speedo_id = tegra_soc_speedo_id();
	int cpu_process_id = tegra_cpu_process_id();
#ifdef CONFIG_TEGRA3_LP_CORE_OVERDRIVE
	int core_process_id = 2;
#else
	int core_process_id = tegra_core_process_id();
#endif

	int i;
	int core_nominal_mv_index;
	int cpu_nominal_mv_index;

#ifndef CONFIG_TEGRA_CORE_DVFS
	tegra_dvfs_core_disabled = true;
#endif
#ifndef CONFIG_TEGRA_CPU_DVFS
	tegra_dvfs_cpu_disabled = true;
#endif

	/*
	 * Find nominal voltages for core (1st) and cpu rails before rail
	 * init. Nominal voltage index in the scaling ladder will also be
	 * used to determine max dvfs frequency for the respective domains.
	 */
	core_nominal_mv_index = get_core_nominal_mv_index(soc_speedo_id);
	if (core_nominal_mv_index < 0) {
		tegra3_dvfs_rail_vdd_core.disabled = true;
		tegra_dvfs_core_disabled = true;
		core_nominal_mv_index = 0;
	}
	tegra3_dvfs_rail_vdd_core.nominal_millivolts =
		core_millivolts[core_nominal_mv_index];

	cpu_nominal_mv_index = get_cpu_nominal_mv_index(
		cpu_speedo_id, cpu_process_id, &cpu_dvfs);
	BUG_ON((cpu_nominal_mv_index < 0) || (!cpu_dvfs));
	tegra3_dvfs_rail_vdd_cpu.nominal_millivolts =
		cpu_millivolts[cpu_nominal_mv_index];

	/* Init rail structures and dependencies */
	tegra_dvfs_init_rails(tegra3_dvfs_rails, ARRAY_SIZE(tegra3_dvfs_rails));
	tegra_dvfs_add_relationships(tegra3_dvfs_relationships,
		ARRAY_SIZE(tegra3_dvfs_relationships));

	/* Search core dvfs table for speedo/process matching entries and
	   initialize dvfs-ed clocks */
	for (i = 0; i <  ARRAY_SIZE(core_dvfs_table); i++) {
		struct dvfs *d = &core_dvfs_table[i];
		if (!match_dvfs_one(d, soc_speedo_id, core_process_id))
			continue;
		init_dvfs_one(d, core_nominal_mv_index);
	}

	/* Initialize matching cpu dvfs entry already found when nominal
	   voltage was determined */
	init_dvfs_one(cpu_dvfs, cpu_nominal_mv_index);
	init_dvfs_cold(cpu_dvfs, cpu_nominal_mv_index);

	/* Finally disable dvfs on rails if necessary */
	if (tegra_dvfs_core_disabled)
		tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_core);
	if (tegra_dvfs_cpu_disabled)
		tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_cpu);

	pr_info("tegra dvfs: VDD_CPU nominal %dmV, scaling %s\n",
		tegra3_dvfs_rail_vdd_cpu.nominal_millivolts,
		tegra_dvfs_cpu_disabled ? "disabled" : "enabled");
	pr_info("tegra dvfs: VDD_CORE nominal %dmV, scaling %s\n",
		tegra3_dvfs_rail_vdd_core.nominal_millivolts,
		tegra_dvfs_core_disabled ? "disabled" : "enabled");
}
Ejemplo n.º 17
0
void tegra_init_cache(bool init)
{
	void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
	u32 aux_ctrl;
	u32 speedo;
	u32 tmp;

#ifdef CONFIG_TRUSTED_FOUNDATIONS
	/* issue the SMC to enable the L2 */
	aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
	tegra_cache_smc(true, aux_ctrl);

	/* after init, reread aux_ctrl and register handlers */
	aux_ctrl = readl_relaxed(p + L2X0_AUX_CTRL);
	l2x0_init(p, aux_ctrl, 0xFFFFFFFF);

	/* override outer_disable() with our disable */
	outer_cache.disable = tegra_l2x0_disable;
#else
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
	writel_relaxed(0x331, p + L2X0_TAG_LATENCY_CTRL);
	writel_relaxed(0x441, p + L2X0_DATA_LATENCY_CTRL);

#elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
#ifdef CONFIG_TEGRA_SILICON_PLATFORM
	/* PL310 RAM latency is CPU dependent. NOTE: Changes here
	   must also be reflected in __cortex_a9_l2x0_restart */

	if (is_lp_cluster()) {
		writel(0x221, p + L2X0_TAG_LATENCY_CTRL);
		writel(0x221, p + L2X0_DATA_LATENCY_CTRL);
	} else {
		/* relax l2-cache latency for speedos 4,5,6 (T33's chips) */
		speedo = tegra_cpu_speedo_id();
		if (speedo == 4 || speedo == 5 || speedo == 6 ||
		    speedo == 12 || speedo == 13) {
			writel(0x442, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x552, p + L2X0_DATA_LATENCY_CTRL);
		} else {
			writel(0x441, p + L2X0_TAG_LATENCY_CTRL);
			writel(0x551, p + L2X0_DATA_LATENCY_CTRL);
		}
	}
#else
	writel(0x770, p + L2X0_TAG_LATENCY_CTRL);
	writel(0x770, p + L2X0_DATA_LATENCY_CTRL);
#endif
#endif
	aux_ctrl = readl(p + L2X0_CACHE_TYPE);
	aux_ctrl = (aux_ctrl & 0x700) << (17-8);
	aux_ctrl |= 0x7C000001;
	if (init) {
		l2x0_init(p, aux_ctrl, 0x8200c3fe);
	} else {
		tmp = aux_ctrl;
		aux_ctrl = readl(p + L2X0_AUX_CTRL);
		aux_ctrl &= 0x8200c3fe;
		aux_ctrl |= tmp;
		writel(aux_ctrl, p + L2X0_AUX_CTRL);
	}
	l2x0_enable();
#endif
}
Ejemplo n.º 18
0
static int init_cpu_edp_limits_calculated(void)
{
	unsigned int max_nr_cpus = num_possible_cpus();
	unsigned int temp_idx, n_cores_idx, pwr_idx;
	unsigned int cpu_g_minf, cpu_g_maxf;
	unsigned int iddq_mA;
	unsigned int cpu_speedo_idx;
	unsigned int cap, limit;
	struct tegra_edp_limits *edp_calculated_limits;
	struct tegra_system_edp_entry *power_edp_calc_limits;
	struct tegra_edp_cpu_leakage_params *params;
	int ret;
	struct clk *clk_cpu_g = tegra_get_clock_by_name("cpu_g");
	int cpu_speedo_id = tegra_cpu_speedo_id();

	/* Determine all inputs to EDP formula */
	iddq_mA = tegra_get_cpu_iddq_value();
	ret = edp_find_speedo_idx(cpu_speedo_id, &cpu_speedo_idx);
	if (ret)
		return ret;

	switch (tegra_chip_id) {
	case TEGRA_CHIPID_TEGRA11:
		params = tegra11x_get_leakage_params(cpu_speedo_idx, NULL);
		break;
	case TEGRA_CHIPID_TEGRA3:
	case TEGRA_CHIPID_TEGRA2:
	default:
		return -EINVAL;
	}

	edp_calculated_limits = kmalloc(sizeof(struct tegra_edp_limits)
					* ARRAY_SIZE(temperatures), GFP_KERNEL);
	BUG_ON(!edp_calculated_limits);

	power_edp_calc_limits = kmalloc(sizeof(struct tegra_system_edp_entry)
				* ARRAY_SIZE(power_cap_levels), GFP_KERNEL);
	BUG_ON(!power_edp_calc_limits);

	cpu_g_minf = 0;
	cpu_g_maxf = clk_get_max_rate(clk_cpu_g);
	freq_voltage_lut_size = (cpu_g_maxf - cpu_g_minf) / FREQ_STEP + 1;
	freq_voltage_lut = kmalloc(sizeof(struct tegra_edp_freq_voltage_table)
				   * freq_voltage_lut_size, GFP_KERNEL);
	if (!freq_voltage_lut) {
		pr_err("%s: failed alloc mem for freq/voltage LUT\n", __func__);
		return -ENOMEM;
	}

	ret = edp_relate_freq_voltage(clk_cpu_g, cpu_speedo_idx,
				freq_voltage_lut_size, freq_voltage_lut);
	if (ret) {
		kfree(freq_voltage_lut);
		return ret;
	}

	if (freq_voltage_lut_size != freq_voltage_lut_size_saved) {
		/* release previous table if present */
		kfree(freq_voltage_lut_saved);
		/* create table to save */
		freq_voltage_lut_saved =
			kmalloc(sizeof(struct tegra_edp_freq_voltage_table) *
			freq_voltage_lut_size, GFP_KERNEL);
		if (!freq_voltage_lut_saved) {
			pr_err("%s: failed alloc mem for freq/voltage LUT\n",
				__func__);
			kfree(freq_voltage_lut);
			return -ENOMEM;
		}
		freq_voltage_lut_size_saved = freq_voltage_lut_size;
	}
	memcpy(freq_voltage_lut_saved,
		freq_voltage_lut,
		sizeof(struct tegra_edp_freq_voltage_table) *
			freq_voltage_lut_size);

	/* Calculate EDP table */
	for (n_cores_idx = 0; n_cores_idx < max_nr_cpus; n_cores_idx++) {
		for (temp_idx = 0;
		     temp_idx < ARRAY_SIZE(temperatures); temp_idx++) {
			edp_calculated_limits[temp_idx].temperature =
				temperatures[temp_idx];
			limit = edp_calculate_maxf(params,
						   temperatures[temp_idx],
						   -1,
						   iddq_mA,
						   n_cores_idx);
			if (limit == -EINVAL)
				return -EINVAL;
			/* apply safety cap if it is specified */
			if (n_cores_idx < 4) {
				cap = params->safety_cap[n_cores_idx];
				if (cap && cap < limit)
					limit = cap;
			}
			edp_calculated_limits[temp_idx].
				freq_limits[n_cores_idx] = limit;
		}

		for (pwr_idx = 0;
		     pwr_idx < ARRAY_SIZE(power_cap_levels); pwr_idx++) {
			power_edp_calc_limits[pwr_idx].power_limit_100mW =
				power_cap_levels[pwr_idx] / 100;
			limit = edp_calculate_maxf(params,
						   50,
						   power_cap_levels[pwr_idx],
						   iddq_mA,
						   n_cores_idx);
			if (limit == -EINVAL)
				return -EINVAL;
			power_edp_calc_limits[pwr_idx].
				freq_limits[n_cores_idx] = limit;
		}
	}

	/*
	 * If this is an EDP table update, need to overwrite old table.
	 * The old table's address must remain valid.
	 */
	if (edp_limits != edp_default_limits) {
		memcpy(edp_limits, edp_calculated_limits,
		       sizeof(struct tegra_edp_limits)
		       * ARRAY_SIZE(temperatures));
		kfree(edp_calculated_limits);
	}
	else {
		edp_limits = edp_calculated_limits;
		edp_limits_size = ARRAY_SIZE(temperatures);
	}

	if (power_edp_limits != power_edp_default_limits) {
		memcpy(power_edp_limits, power_edp_calc_limits,
		       sizeof(struct tegra_system_edp_entry)
		       * ARRAY_SIZE(power_cap_levels));
		kfree(power_edp_calc_limits);
	} else {
		power_edp_limits = power_edp_calc_limits;
		power_edp_limits_size = ARRAY_SIZE(power_cap_levels);
	}

	kfree(freq_voltage_lut);
	return 0;
}
Ejemplo n.º 19
0
/*
 * Specify regulator current in mA, e.g. 5000mA
 * Use 0 for default
 */
void __init tegra_init_cpu_edp_limits(unsigned int regulator_mA)
{
	int cpu_speedo_id = tegra_cpu_speedo_id();
	int cpu_process_id = tegra_cpu_process_id();
	int i, j;
	struct tegra_edp_limits *e;
	struct tegra_edp_entry *t = (struct tegra_edp_entry *)tegra_edp_map;
	int tsize = sizeof(tegra_edp_map)/sizeof(struct tegra_edp_entry);

	if (!regulator_mA) {
		edp_limits = edp_default_limits;
		edp_limits_size = ARRAY_SIZE(edp_default_limits);
		return;
	}
	regulator_cur = regulator_mA;

	for (i = 0; i < tsize; i++) {
		if (t[i].speedo_id == cpu_speedo_id &&
		    t[i].regulator_100mA <= regulator_mA / 100)
			break;
	}

	/* No entry found in tegra_edp_map */
	if (i >= tsize) {
		edp_limits = edp_default_limits;
		edp_limits_size = ARRAY_SIZE(edp_default_limits);
		return;
	}

	/* Find all rows for this entry */
	for (j = i + 1; j < tsize; j++) {
		if (t[i].speedo_id != t[j].speedo_id ||
		    t[i].regulator_100mA != t[j].regulator_100mA)
			break;
	}

	edp_limits_size = j - i;
	e = kmalloc(sizeof(struct tegra_edp_limits) * edp_limits_size,
		    GFP_KERNEL);
	BUG_ON(!e);

#ifdef CONFIG_CPU_OVERCLOCK
  switch (cpu_process_id) {
    case 0:
      edpl0 = 20;
      edpl123 = 30;
      break;
    case 1:
      edpl0 = 20;
      edpl123 = 30;
      break;
    case 2:
      edpl0 = 20;
      edpl123 = 30;
      break;
    case 3:
    default:
      edpl0 = 20;
      edpl123 = 30;
      break;
}
#endif

	for (j = 0; j < edp_limits_size; j++) {
#ifdef CONFIG_CPU_OVERCLOCK
		e[j].temperature = (int)t[i+j].temperature;
		e[j].freq_limits[0] = (unsigned int)(t[i+j].freq_limits[0]+edpl0) * 10000;
		e[j].freq_limits[1] = (unsigned int)(t[i+j].freq_limits[1]+edpl123) * 10000;
		e[j].freq_limits[2] = (unsigned int)(t[i+j].freq_limits[2]+edpl123) * 10000;
		e[j].freq_limits[3] = (unsigned int)(t[i+j].freq_limits[3]+edpl123) * 10000;
#else
		e[j].temperature = (int)t[i+j].temperature;
		e[j].freq_limits[0] = (unsigned int)t[i+j].freq_limits[0] * 10000;
		e[j].freq_limits[1] = (unsigned int)(t[i+j].freq_limits[1]+10) * 10000;
		e[j].freq_limits[2] = (unsigned int)(t[i+j].freq_limits[2]+10) * 10000;
		e[j].freq_limits[3] = (unsigned int)(t[i+j].freq_limits[3]+10) * 10000;
#endif
	}

	if (edp_limits != edp_default_limits)
		kfree(edp_limits);

	edp_limits = e;
}